diff --git a/go.mod b/go.mod index 0609416b35b..4f6c0df6266 100644 --- a/go.mod +++ b/go.mod @@ -15,11 +15,11 @@ require ( github.com/alvaroloes/enumer v1.1.2 github.com/apparentlymart/go-cidr v1.1.0 github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9 - github.com/containers/image/v5 v5.22.0 - github.com/containers/podman/v4 v4.1.1 + github.com/containers/image/v5 v5.24.1 + github.com/containers/podman/v4 v4.4.2 github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-semver v0.3.0 - github.com/coreos/go-systemd/v22 v22.3.2 + github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/ignition v0.35.0 github.com/davecgh/go-spew v1.1.1 github.com/form3tech-oss/jwt-go v3.2.5+incompatible @@ -27,7 +27,7 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-chi/chi/v5 v5.0.8 github.com/go-logr/logr v1.2.4 - github.com/go-test/deep v1.0.8 + github.com/go-test/deep v1.1.0 github.com/gofrs/uuid v4.2.0+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/mock v1.6.0 @@ -41,10 +41,10 @@ require ( github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234 github.com/jongio/azidext/go/azidext v0.5.0 github.com/microsoftgraph/msgraph-sdk-go v1.4.0 - github.com/onsi/ginkgo/v2 v2.3.1 - github.com/onsi/gomega v1.22.0 + github.com/onsi/ginkgo/v2 v2.7.0 + github.com/onsi/gomega v1.26.0 github.com/open-policy-agent/frameworks/constraint v0.0.0-20221109005544-7de84dff5081 - github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab + github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 @@ -65,7 +65,7 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 golang.org/x/crypto v0.12.0 golang.org/x/net v0.14.0 - golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 + golang.org/x/oauth2 v0.4.0 golang.org/x/sync v0.1.0 golang.org/x/text v0.12.0 golang.org/x/tools v0.6.0 @@ -89,61 +89,69 @@ require ( github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect - github.com/BurntSushi/toml v1.2.0 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/hcsshim v0.9.6 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/chzyer/readline v1.5.1 // indirect github.com/cilium/ebpf v0.7.0 // indirect github.com/cjlapao/common-go v0.0.39 // indirect + github.com/container-orchestrated-devices/container-device-interface v0.5.3 // indirect github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/containerd v1.6.18 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect - github.com/containers/buildah v1.27.1 // indirect - github.com/containers/common v0.49.1 // indirect - github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect - github.com/containers/ocicrypt v1.1.5 // indirect - github.com/containers/psgo v1.7.2 // indirect - github.com/containers/storage v1.42.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect + github.com/containers/buildah v1.29.0 // indirect + github.com/containers/common v0.51.0 // indirect + github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect + github.com/containers/ocicrypt v1.1.7 // indirect + github.com/containers/psgo v1.8.0 // indirect + github.com/containers/storage v1.45.3 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/creack/pty v1.1.17 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect - github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.14.1 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.24.1 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/strfmt v0.21.3 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/gobuffalo/flect v0.2.5 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.10.2 // indirect - github.com/google/go-containerregistry v0.10.0 // indirect + github.com/google/go-containerregistry v0.12.1 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect @@ -154,16 +162,17 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.15.9 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect + github.com/kr/fs v0.1.0 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect + github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect @@ -180,7 +189,7 @@ require ( github.com/microsoft/kiota-serialization-text-go v1.0.0 // indirect github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect + github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -190,56 +199,61 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 // indirect - github.com/opencontainers/selinux v1.10.1 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86 // indirect + github.com/opencontainers/selinux v1.10.2 // indirect github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/sftp v1.13.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday v1.6.0 // indirect - github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect - github.com/spf13/cobra v1.5.0 // indirect + github.com/sigstore/fulcio v1.0.0 // indirect + github.com/sigstore/rekor v1.0.1 // indirect + github.com/sigstore/sigstore v1.5.1 // indirect + github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/sylabs/sif/v2 v2.8.1 // indirect + github.com/sylabs/sif/v2 v2.9.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect - github.com/theupdateframework/go-tuf v0.3.2 // indirect + github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.10 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.4.2 // indirect + github.com/vbauerster/mpb/v7 v7.5.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.1.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect + go.mongodb.org/mongo-driver v1.11.1 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.15.1 // indirect go.opentelemetry.io/otel/trace v1.15.1 // indirect go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.11.0 // indirect - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/time v0.2.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect - google.golang.org/grpc v1.49.0 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + google.golang.org/grpc v1.51.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect @@ -697,7 +711,6 @@ replace ( github.com/clarketm/json => github.com/clarketm/json v1.15.7 // Later versions not compatible with Go 1.16 github.com/cockroachdb/sentry-go => github.com/getsentry/sentry-go v0.11.0 github.com/docker/spdystream => github.com/docker/spdystream v0.1.0 - github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.8 // Replace old GoGo Protobuf versions https://nvd.nist.gov/vuln/detail/CVE-2021-3121 github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 github.com/mrnold/go-libnbd => github.com/mrnold/go-libnbd v1.4.1-cdi // v1.10.0 uses an invalid module path diff --git a/go.sum b/go.sum index f9e5789fc3c..c94ffb4b5ce 100644 --- a/go.sum +++ b/go.sum @@ -1,26 +1,14 @@ -4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= -github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= -github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-sdk-for-go v63.1.0+incompatible h1:yNC7qlSUWVF8p0TzxdmWW1FJ3DdIA+0Pge41IU/2+9U= github.com/Azure/azure-sdk-for-go v63.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= @@ -60,7 +48,6 @@ github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnRO github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k= github.com/ClickHouse/clickhouse-go v1.4.9/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= @@ -70,13 +57,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -89,34 +71,28 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -134,17 +110,12 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfAMY= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= @@ -156,18 +127,14 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -182,29 +149,23 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -215,18 +176,17 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= @@ -246,13 +206,13 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9 h1:88tJLy+/ao5kPBv1EtNyduXeWrTHV47seJPgI7pWgDs= github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9/go.mod h1:jy75q4Q7stkoOx8bCRnIm0t1Vh6Pt4OJvcwA9+oJsqI= -github.com/container-orchestrated-devices/container-device-interface v0.4.0/go.mod h1:E1zcucIkq9P3eyNmY+68dBQsTcsXJh9cgRo2IVNScKQ= +github.com/container-orchestrated-devices/container-device-interface v0.5.3 h1:4v6FMaa1Pn8SS0IBwgsvCsno8HRXoQvI87Uj1Zu7Tw4= +github.com/container-orchestrated-devices/container-device-interface v0.5.3/go.mod h1:SQohok453ewi9dItvUcO0MrP7K1CEQTxPDNd7OV+nxI= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= @@ -260,7 +220,6 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -269,9 +228,6 @@ github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8a github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig= -github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94= github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -279,27 +235,19 @@ github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= -github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= -github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM= github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= +github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= +github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= @@ -311,38 +259,25 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.0/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.0.0/go.mod h1:liDVn61uqF5YCAh8W4VNt2cXb8h20RjRQqsRfiZIRaI= -github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY= -github.com/containers/buildah v1.27.1 h1:i5yP3uJBq9mKANOP4WA+5x9cBuEQ4FJIAzEPoPzRrXQ= -github.com/containers/buildah v1.27.1/go.mod h1:anH3ExvDXRNP9zLQCrOc1vWb5CrhqLF/aYFim4tslvA= -github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= -github.com/containers/common v0.49.1 h1:6y4/s2WwYxrv+Cox7fotOo316wuZI+iKKPUQweCYv50= -github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= -github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= -github.com/containers/image/v5 v5.22.0 h1:KemxPmD4D2YYOFZN2SgoTk7nBFcnwPiPW0MqjYtknSE= -github.com/containers/image/v5 v5.22.0/go.mod h1:D8Ksv2RNB8qLJ7xe1P3rgJJOSQpahA6amv2Ax++/YO4= -github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= -github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/buildah v1.29.0 h1:rA3S2SXJffrJjvY2kyxOsAaIseDY6Ib77FsD7n88Mj4= +github.com/containers/buildah v1.29.0/go.mod h1:mah+CGmpOjkBJJ5rhOP0M2ETnODhiuhtnXusfh0hc6Q= +github.com/containers/common v0.51.0 h1:Ax4YHNTG8cEPHZJcMYRoP7sfBgOISceeyOvmZzmSucg= +github.com/containers/common v0.51.0/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM= +github.com/containers/image/v5 v5.24.1 h1:XaRw3FJmvZtI297uBVTJluUVH4AQJ//YpHviaOw0C4M= +github.com/containers/image/v5 v5.24.1/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= -github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= -github.com/containers/podman/v4 v4.1.1 h1:ulT4QEn49K7ApsP9vtl0PZp/gfipuYEdqcUbA6r7mpw= -github.com/containers/podman/v4 v4.1.1/go.mod h1:ZgZCaL1EAnRXPbCUVQ3P24UZ+uGAGUTXLysvEBwpmkY= -github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc= -github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0= -github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= -github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc= -github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= -github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= -github.com/containers/storage v1.42.0 h1:zm2AQD4NDeTB3JQ8X+Wo5+VRqNB+b4ocEd7Qj6ylPJA= -github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78= +github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= +github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= +github.com/containers/podman/v4 v4.4.2 h1:OSlO6NZ3lQuHOhb+MhW0Rl8IU7W1SlGK/0jh+utLqYU= +github.com/containers/podman/v4 v4.4.2/go.mod h1:q7uhwIw4/69ExGUjNf1Bum3NHGT3yInyC3TJWvnFBeI= +github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= +github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= +github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= +github.com/containers/storage v1.45.3 h1:GbtTvTtp3GW2/tcFg5VhgHXcYMwVn2KfZKiHjf9FAOM= +github.com/containers/storage v1.45.3/go.mod h1:OdRUYHrq1HP6iAo79VxqtYuJzC5j4eA2I60jKOoCT7g= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -350,23 +285,19 @@ github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFE github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/stream-metadata-go v0.1.3/go.mod h1:zxVoWUDB0H8+tZRhTs0LeLeR/QdmBsuo7FN1oOBrWTE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -376,6 +307,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -393,27 +326,22 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= -github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/go-libvirt v0.0.0-20201209184759-e2a69bcd5bd1/go.mod h1:QS1XzqZLcDniNYrN7EZefq3wIyb/M2WmJbql4ZKoc1Q= -github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001/go.mod h1:IetBE52JfFxK46p2n2Rqm+p5Gx1gpu2hRHsrbnPOWZQ= github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= @@ -421,7 +349,6 @@ github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.12+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -430,32 +357,27 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.1-ce+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4/go.mod h1:jN1ZaUPSNA8jm10nmaRLky84qV/iCeiHmcEf3EbP+dc= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -470,10 +392,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -489,17 +408,14 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSY github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= -github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -514,14 +430,10 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= -github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= -github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= @@ -536,7 +448,6 @@ github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= @@ -559,53 +470,66 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo= +github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= +github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= @@ -623,11 +547,10 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6 github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5 h1:boOtwyhKoC3Aadiw5zbhU54YyCkm9EpZCSN6mOx0KLc= +github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5/go.mod h1:fXoNnqaUvdKqjJmMGeiBgmRphUg+kO0MT4AhPOP6+Qg= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -645,18 +568,14 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= @@ -671,16 +590,6 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -692,7 +601,6 @@ github.com/google/cel-go v0.10.2 h1:fJtfqBC/zg/+M0W32IemohwB6u5oFWv1iVGNpgUxan0= github.com/google/cel-go v0.10.2/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/gnostic v0.5.5 h1:xaJtlbPCF2oT4Aidl/Al5W6lRq7g5+biHTihznoaa7k= github.com/google/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -707,13 +615,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.8.0/go.mod h1:wW5v71NHGnQyb4k+gSshjxidrC7lN33MdWEn+Mz9TsI= -github.com/google/go-containerregistry v0.10.0 h1:qd/fv2nQajGZJenaNcdaghlwSPjQ0NphN9hzArr2WWg= -github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y= +github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8= +github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= @@ -723,24 +630,18 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/renameio v1.0.0/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -748,21 +649,15 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/csrf v1.7.1 h1:Ir3o2c1/Uzj6FBxMlAUB6SivgVMy1ONXwYgXn+/aHPE= github.com/gorilla/csrf v1.7.1/go.mod h1:+a/4tCmqhG6/w4oafeAZ9pEa3/NZOWYVbD9fV0FwIQA= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -776,33 +671,15 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+VLJyQ+FyaiGmprEYgI04Gs7U= github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= @@ -827,11 +704,9 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -839,33 +714,26 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/honeycombio/beeline-go v1.1.1 h1:sU8r4ae34uEL3/CguSl8Mr+Asz9DL1nfH9Wwk85Pc7U= -github.com/honeycombio/libhoney-go v1.15.2 h1:5NGcjOxZZma13dmzNcl3OtGbF1hECA0XHJNHEb2t2ck= +github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= +github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU= github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4= github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= @@ -878,12 +746,8 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234 h1:R0Hokq55Hv3SpbXyfZRh8vIrKFAPZ+SQqyTsRc7J/4E= github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234/go.mod h1:kZxm8EB19+pd3nT92t0aQblXb7K4sHG8cp59cleqdNc= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= @@ -897,18 +761,12 @@ github.com/joelanford/ignore v0.0.0-20210607151042-0d25dc18b62d/go.mod h1:7HQupe github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jongio/azidext/go/azidext v0.5.0 h1:uPInXD4NZ3J0k79FPwIA0YXknFn+WcqZqSgs3/jPgvQ= github.com/jongio/azidext/go/azidext v0.5.0/go.mod h1:TVRX/hJhzbsCKaOIzicH6a8IvOH0hpjWk/JwZZgtXeU= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= -github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -916,40 +774,35 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= +github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -962,27 +815,18 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e h1:1aV3EJ4ZMsc63MFU4rB+ccSEhZvvVD71T9RA4Rqd3hI= -github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= @@ -990,9 +834,7 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1003,21 +845,16 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -1027,11 +864,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -1044,22 +878,11 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/microsoft/kiota-abstractions-go v1.0.0 h1:teQS3yOmcTyps+O48AD17LI8TR1B3wCEwGFcwC6K75c= github.com/microsoft/kiota-abstractions-go v1.0.0/go.mod h1:2yaRQnx2KU7UaenYSApiTT4pf7fFkPV0B71Rm2uYynQ= github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= @@ -1077,22 +900,20 @@ github.com/microsoftgraph/msgraph-sdk-go v1.4.0/go.mod h1:JIDL1xENx92B60NjO2ACyq github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= +github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -1102,6 +923,7 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -1113,21 +935,15 @@ github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hx github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1136,16 +952,12 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1154,10 +966,7 @@ github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq4 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= @@ -1166,23 +975,18 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1202,9 +1006,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.3.1 h1:8SbseP7qM32WcvE6VaN6vfXxv698izmsJ1UQX9ve7T8= -github.com/onsi/ginkgo/v2 v2.3.1/go.mod h1:Sv4yQXwG5VmF7tm3Q5Z+RWUpPo24LF1mpnz2crUb8Ys= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1216,12 +1019,10 @@ github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.22.0 h1:AIg2/OntwkBiCg5Tt1ayyiF1ArFrWFoCSMtMi/wdApk= -github.com/onsi/gomega v1.22.0/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-cluster-management/api v0.0.0-20210527013639-a6845f2ebcb1/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= github.com/open-policy-agent/frameworks/constraint v0.0.0-20221109005544-7de84dff5081 h1:LcxhUNtgAf1dvHIDfOI/sO0LQALSxQCBHxTZ/5CrBxo= @@ -1234,37 +1035,28 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= -github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= -github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab h1:YQZXa3elcHgKXAa2GjVFC9M3JeP7ZPyFD1YByDx/dgQ= -github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20190417131837-cd1349b7c47e/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= -github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 h1:Rf+QsQGxrYCia8mVyOPnoQZ+vJkZGL+ESWBDUM5s9cQ= -github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= +github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb h1:1xSVPOd7/UA+39/hXEGnBJ13p6JFB0E1EvQFlrRDOXI= +github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86 h1:AaK4/fBxOmEFtb1bs/7KrJsQIgVPnhIrtgJ92RaqM60= +github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= +github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= github.com/openshift/api v0.0.0-20220124143425-d74727069f6f h1:iOTv1WudhVm2UsoST+L+ZrA5A9w57h9vmQsdlBuqG6g= github.com/openshift/api v0.0.0-20220124143425-d74727069f6f/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -1279,7 +1071,6 @@ github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0 h1:ATjjHF7IbYrPMrxpjVd0b76/zQztTMN1Dn7Qzs4rGJE= github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0/go.mod h1:VIxA5HhvBmsqVn7aUVQYs004B9K4U5A+HrFwvRq2nK8= -github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0 h1:hiwAdZ5ishMe4qtUejv+CuBWra18cjZMHVFlVPOZnw0= github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0/go.mod h1:6AmNM4N4nHftckybV/U7bQW+5AvK5TW81ndSI6KEidw= github.com/openshift/machine-api-operator v0.2.1-0.20220124104622-668c5b52b104/go.mod h1:1j0Au43h8Sn2B81FxOudqcmKnzvMNEH+vfg5y1g2xAk= @@ -1289,6 +1080,7 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= @@ -1324,7 +1116,6 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1332,7 +1123,6 @@ github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Do github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1341,15 +1131,14 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= @@ -1364,7 +1153,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= @@ -1372,15 +1160,15 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= @@ -1397,16 +1185,6 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= @@ -1422,50 +1200,40 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= -github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b h1:jLwzNAxsHzKw5sHju7bUk0iQSynZxWAOtnXD5d37Vto= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b/go.mod h1:noHZFMVoy0oY+ICCojiGUgv+/ecK+1M6huoUVWAIJoU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 h1:5TPCWtlOsaCiuAaglfZX7obd+/kuE8lGUhsVQzmQSaI= -github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1/go.mod h1:y83NePRM98MJpbGgBgi54UZduhG0aD7lYngAVCx+i/E= +github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg= +github.com/sigstore/fulcio v1.0.0/go.mod h1:j4MzLxX/Be0rHYh3JF2dgMorkWGzEMHBqIHwFU8I/Rw= +github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c= +github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g= +github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs= +github.com/sigstore/sigstore v1.5.1/go.mod h1:3i6UTWVNtFwOtbgG63FZZNID4vO9KcO8AszIJlaNI8k= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1476,16 +1244,12 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -1493,16 +1257,14 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= -github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -1517,8 +1279,6 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1527,85 +1287,60 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= -github.com/sylabs/sif/v2 v2.8.1 h1:whr4Vz12RXfLnYyVGHoD/rD/hbF2g9OW7BJHa+WIqW8= -github.com/sylabs/sif/v2 v2.8.1/go.mod h1:LQOdYXC9a8i7BleTKRw9lohi0rTbXkJOeS9u0ebvgyM= -github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= +github.com/sylabs/sif/v2 v2.9.0 h1:q9K92j1QW4/QLOtKh9YZpJHrXav6x15AVhQGPVLcg+4= +github.com/sylabs/sif/v2 v2.9.0/go.mod h1:bRdFzcqif0eDjwx0isG4cgTFoKTQn/vfBXVSoP2rB2Y= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tebeka/selenium v0.9.9 h1:cNziB+etNgyH/7KlNI7RMC1ua5aH1+5wUlFQyzeMh+w= github.com/tebeka/selenium v0.9.9/go.mod h1:5Fr8+pUvU6B1OiPfkdCKdXZyr5znvVkxuPd0NOdZCQc= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/theupdateframework/go-tuf v0.3.2 h1:fzjeUUEL3zZpKgWMn35OoksvuCMVMS4ZZ4o2ZcbUGOU= -github.com/theupdateframework/go-tuf v0.3.2/go.mod h1:kfQTX2LhNeK/cp03OBiZHoua0Pp2l9w4ShRwFtc0oKg= +github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 h1:s+Yvt6bzRwHljSE7j6DLBDcfpZEdBhrvLgOUmd8f7ZM= +github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5/go.mod h1:Le8NAjvDJK1vmLgpVYr4AR1Tqam/b/mTdQyTy37UJDA= github.com/thoas/go-funk v0.8.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/ugorji/go v1.2.0/go.mod h1:1ny++pKMXhLWrwWV5Nf+CbOuZJhMoaFD+0GMFfd8fEc= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.2.0/go.mod h1:dXvG35r7zTX6QImXOSFhGMmKtX+wJ7VTWzGvYQGIjBs= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= -github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng= -github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w= +github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.22.2/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= @@ -1630,15 +1365,9 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1659,10 +1388,8 @@ github.com/zmap/zcrypto v0.0.0-20200513165325-16679db567ff/go.mod h1:TxpejqcVKQj github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21/go.mod h1:TxpejqcVKQjQaVVmMGfzx5HnmFMdIU+vLtaCyPBfGI4= github.com/zmap/zlint/v2 v2.2.1/go.mod h1:ixPWsdq8qLxYRpNUTbcKig3R7WgmspsHGLhCCs6rFAM= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -1670,68 +1397,55 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd h1:Uo/x0Ir5vQJ+683GXB9Ug+4fcjsbp7z7Ul8UaZbhsRM= go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1751,14 +1465,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= @@ -1773,15 +1485,12 @@ golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9t golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -1798,10 +1507,7 @@ golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -1817,26 +1523,19 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1853,7 +1552,6 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1861,7 +1559,6 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1870,13 +1567,12 @@ golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1899,24 +1595,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1929,18 +1619,15 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1952,8 +1639,6 @@ golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201116194326-cc9327a14d48/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1964,31 +1649,20 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1996,13 +1670,16 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= @@ -2015,9 +1692,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -2027,46 +1702,34 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= +golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191004183538-27eeabb02079/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2075,57 +1738,25 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= -golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.7-0.20210921203514-b98090b833e3/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= @@ -2148,26 +1779,19 @@ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2177,8 +1801,6 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2188,15 +1810,13 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200709232328-d8193ee9cc3e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -2208,8 +1828,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -2243,12 +1861,12 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2261,13 +1879,9 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.6.2/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= @@ -2288,7 +1902,6 @@ k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg= k8s.io/controller-manager v0.23.0/go.mod h1:6/IKItSv6p9FY3mSbHgsOYmt4y+HDxiC5hEFg9rJVc8= -k8s.io/cri-api v0.23.0/go.mod h1:2edENu3/mkyW3c6fVPPPaVGEFbLRacJizBbSp7ZOLOo= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -2313,14 +1926,8 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20211002133954-f839ab2b2b11/go.mod h1:3RUAWoUC2YFIr0yZ91R4rLakSx2OhdZXUBSV4g4PucY= oras.land/oras-go v0.4.0/go.mod h1:VJcU+VE4rkclUbum5C0O7deEZbBYnsnpbGSACwTjOcg= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA= sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4= sigs.k8s.io/controller-tools v0.5.0 h1:3u2RCwOlp0cjCALAigpOcbAf50pE+kHSdueUosrC/AE= @@ -2340,7 +1947,6 @@ sigs.k8s.io/kustomize/kyaml v0.13.3 h1:tNNQIC+8cc+aXFTVg+RtQAOsjwUdYBZRAgYOVI3RB sigs.k8s.io/kustomize/kyaml v0.13.3/go.mod h1:/ya3Gk4diiQzlE4mBh7wykyLRFZNvqlbh+JnwQ9Vhrc= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 00000000000..94f480de94e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore index b883f1fdc6d..815e20660e5 100644 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -1 +1,10 @@ +.vscode/ + *.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 00000000000..af403bb13a0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,144 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - goconst # strings that should be constants + - godot # comments end in a period + - misspell + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - stylecheck # golint replacement, less configurable than revive + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nestif # deeply nested ifs + - nilerr # returns nil even with non-nil error + - prealloc # slices that can be pre-allocated + - structcheck # unused struct fields + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM + stylecheck: + checks: + - "all" + - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md index 683be1dcf9c..7474b4f0b65 100644 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -13,16 +13,60 @@ Please see the LICENSE file for licensing information. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` ## Code of Conduct @@ -30,8 +74,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +## Special Thanks +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 00000000000..869fdfe2b24 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 2be34af4310..09621c88463 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -7,11 +8,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "runtime" "syscall" "unicode/utf16" + + "golang.org/x/sys/windows" ) //sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead @@ -24,7 +26,7 @@ const ( BackupAlternateData BackupLink BackupPropertyData - BackupObjectId + BackupObjectId //revive:disable-line:var-naming ID, not Id BackupReparseData BackupSparseBlock BackupTxfsData @@ -34,14 +36,16 @@ const ( StreamSparseAttributes = uint32(8) ) +//nolint:revive // var-naming: ALL_CAPS const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY ) // BackupHeader represents a backup stream of a file. type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id Id uint32 // The backup stream ID Attributes uint32 // Stream attributes Size int64 // The size of the stream in bytes @@ -49,8 +53,8 @@ type BackupHeader struct { Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). } -type win32StreamId struct { - StreamId uint32 +type win32StreamID struct { + StreamID uint32 Attributes uint32 Size uint64 NameSize uint32 @@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader { // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this if s, ok := r.r.(io.Seeker); ok { // Make sure Seek on io.SeekCurrent sometimes succeeds // before trying the actual seek. @@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { r.bytesLeft = 0 } } - if _, err := io.Copy(ioutil.Discard, r); err != nil { + if _, err := io.Copy(io.Discard, r); err != nil { return nil, err } } - var wsi win32StreamId + var wsi win32StreamID if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { return nil, err } hdr := &BackupHeader{ - Id: wsi.StreamId, + Id: wsi.StreamID, Attributes: wsi.Attributes, Size: int64(wsi.Size), } @@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { } hdr.Name = syscall.UTF16ToString(name) } - if wsi.StreamId == BackupSparseBlock { + if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { return nil, err } @@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { return fmt.Errorf("missing %d bytes", w.bytesLeft) } name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, + wsi := win32StreamID{ + StreamID: hdr.Id, Attributes: hdr.Attributes, Size: uint64(hdr.Size), NameSize: uint32(len(name) * 2), @@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } runtime.KeepAlive(r.f) if bytesRead == 0 { @@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } runtime.KeepAlive(w.f) if int(bytesWritten) != len(b) { @@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32) if err != nil { return nil, err } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go similarity index 81% rename from vendor/github.com/Microsoft/go-winio/backuptar/noop.go rename to vendor/github.com/Microsoft/go-winio/backuptar/doc.go index d39eccf0238..965d52ab04c 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go @@ -1,4 +1,3 @@ -// +build !windows // This file only exists to allow go get on non-Windows platforms. package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go index 34160966399..455fd798eb9 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go @@ -1,3 +1,5 @@ +//go:build windows + package backuptar import ( diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 2342a7fcd6f..6b3b0cd5198 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package backuptar @@ -7,7 +8,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "path/filepath" "strconv" "strings" @@ -18,17 +18,18 @@ import ( "golang.org/x/sys/windows" ) +//nolint:deadcode,varcheck // keep unused constants for potential future use const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket + cISUID = 0004000 // Set uid + cISGID = 0002000 // Set gid + cISVTX = 0001000 // Save text (sticky bit) + cISDIR = 0040000 // Directory + cISFIFO = 0010000 // FIFO + cISREG = 0100000 // Regular file + cISLNK = 0120000 // Symbolic link + cISBLK = 0060000 // Block special file + cISCHR = 0020000 // Character special file + cISSOCK = 0140000 // Socket ) const ( @@ -44,7 +45,7 @@ const ( // zeroReader is an io.Reader that always returns 0s. type zeroReader struct{} -func (zr zeroReader) Read(b []byte) (int, error) { +func (zeroReader) Read(b []byte) (int, error) { for i := range b { b[i] = 0 } @@ -55,7 +56,7 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { curOffset := int64(0) for { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint err = io.ErrUnexpectedEOF } if err != nil { @@ -71,8 +72,8 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { } // archive/tar does not support writing sparse files // so just write zeroes to catch up to the current offset. - if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { - return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) + if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err) } if bhdr.Size == 0 { // A sparse block with size = 0 is used to mark the end of the sparse blocks. @@ -106,7 +107,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR + hdr.Mode |= cISDIR hdr.Size = 0 hdr.Typeflag = tar.TypeDir } @@ -116,32 +117,29 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta // SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file // from the tar header and returns the security descriptor into a byte slice. func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { - // Maintaining old SDDL-based behavior for backward - // compatibility. All new tar headers written by this library - // will have raw binary for the security descriptor. - var sd []byte - var err error - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) + sd, err := base64.StdEncoding.DecodeString(sdraw) if err != nil { + // Not returning sd as-is in the error-case, as base64.DecodeString + // may return partially decoded data (not nil or empty slice) in case + // of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387 return nil, err } + return sd, nil } - return sd, nil + // Maintaining old SDDL-based behavior for backward compatibility. All new + // tar headers written by this library will have raw binary for the security + // descriptor. + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + return winio.SddlToSecurityDescriptor(sddl) + } + return nil, nil } // ExtendedAttributesFromTarHeader reads the EAs associated with the header of the // current file from the tar header and returns it as a byte slice. func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { - var eas []winio.ExtendedAttribute - var eadata []byte - var err error + var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful for k, v := range hdr.PAXRecords { if !strings.HasPrefix(k, hdrEaPrefix) { continue @@ -155,13 +153,15 @@ func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { Value: data, }) } + var eaData []byte + var err error if len(eas) != 0 { - eadata, err = winio.EncodeExtendedAttributes(eas) + eaData, err = winio.EncodeExtendedAttributes(eas) if err != nil { return nil, err } } - return eadata, nil + return eaData, nil } // EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header @@ -182,11 +182,9 @@ func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { // // The additional Win32 metadata is: // -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { name = filepath.ToSlash(name) hdr := BasicInfoHeader(name, size, fileInfo) @@ -209,7 +207,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size var dataHdr *winio.BackupHeader for dataHdr == nil { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -217,21 +215,21 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } switch bhdr.Id { case winio.BackupData: - hdr.Mode |= c_ISREG + hdr.Mode |= cISREG if !readTwice { dataHdr = bhdr } case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) + sd, err := io.ReadAll(br) if err != nil { return err } hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) case winio.BackupReparseData: - hdr.Mode |= c_ISLNK + hdr.Mode |= cISLNK hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) + reparseBuffer, _ := io.ReadAll(br) rp, err := winio.DecodeReparsePoint(reparseBuffer) if err != nil { return err @@ -242,7 +240,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size hdr.Linkname = rp.Target case winio.BackupEaData: - eab, err := ioutil.ReadAll(br) + eab, err := io.ReadAll(br) if err != nil { return err } @@ -276,7 +274,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } for dataHdr == nil { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -311,7 +309,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // range of the file containing the range contents. Finally there is a sparse block stream with // size = 0 and offset = . - if dataHdr != nil { + if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity // A data stream was found. Copy the data. // We assume that we will either have a data stream size > 0 XOR have sparse block streams. if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { @@ -319,13 +317,13 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) } if _, err = io.Copy(t, br); err != nil { - return fmt.Errorf("%s: copying contents from data stream: %s", name, err) + return fmt.Errorf("%s: copying contents from data stream: %w", name, err) } } else if size > 0 { // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. // These files have no sparse block streams, so skip the copySparse call if file size = 0. if err = copySparse(t, br); err != nil { - return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) + return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err) } } } @@ -335,7 +333,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // been written. In practice, this means that we don't get EA or TXF metadata. for { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -343,35 +341,30 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } switch bhdr.Id { case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Format: hdr.Format, - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { + if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 { // Unsupported for now, since the size of the alternate stream is not present // in the backup stream until after the data has been read. return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) } + altName := strings.TrimSuffix(bhdr.Name, ":$DATA") + hdr = &tar.Header{ + Format: hdr.Format, + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: @@ -413,7 +406,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win } fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) } - return + return name, size, fileInfo, err } // WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple @@ -474,7 +467,6 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( if err != nil { return nil, err } - } if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 00000000000..1f5bfe2d548 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go index 4051c1b33bf..e104dbdfdf9 100644 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { err = errInvalidEaBuffer - return + return ea, nb, err } nameOffset := fileFullEaInformationSize @@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { nextOffset := int(info.NextEntryOffset) if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { err = errInvalidEaBuffer - return + return ea, nb, err } ea.Name = string(b[nameOffset : nameOffset+nameLen]) @@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { if info.NextEntryOffset != 0 { nb = b[info.NextEntryOffset:] } - return + return ea, nb, err } // DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION @@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { eas = append(eas, ea) b = nb } - return + return eas, err } func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 293ab54c80c..175a99d3f42 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -11,6 +11,8 @@ import ( "sync/atomic" "syscall" "time" + + "golang.org/x/sys/windows" ) //sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx @@ -24,6 +26,8 @@ type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg func (b *atomicBool) swap(new bool) bool { var newInt int32 if new { @@ -32,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool { return atomic.SwapInt32((*int32)(b), newInt) == 1 } -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - var ( ErrFileClosed = errors.New("file has already been closed") ErrTimeout = &timeoutError{} @@ -44,28 +43,28 @@ var ( type timeoutError struct{} -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once var ioCompletionPort syscall.Handle -// ioResult contains the result of an asynchronous IO operation +// ioResult contains the result of an asynchronous IO operation. type ioResult struct { bytes uint32 err error } -// ioOperation represents an outstanding asynchronous Win32 IO +// ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { o syscall.Overlapped ch chan ioResult } -func initIo() { +func initIO() { h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) @@ -94,15 +93,15 @@ type deadlineHandler struct { timedout atomicBool } -// makeWin32File makes a new win32File from an existing file handle +// makeWin32File makes a new win32File from an existing file handle. func makeWin32File(h syscall.Handle) (*win32File, error) { f := &win32File{handle: h} - ioInitOnce.Do(initIo) + ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) if err != nil { return nil, err } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) if err != nil { return nil, err } @@ -121,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { return f, nil } -// closeHandle closes the resources associated with a Win32 handle +// closeHandle closes the resources associated with a Win32 handle. func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. if !f.closing.swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) + _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start syscall.Close(f.handle) @@ -144,14 +143,14 @@ func (f *win32File) Close() error { return nil } -// IsClosed checks if the file has been closed +// IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { return f.closing.isSet() } -// prepareIo prepares for a new IO operation. +// prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { +func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() if f.closing.isSet() { f.wgLock.RUnlock() @@ -164,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) { return c, nil } -// ioCompletionProcessor processes completed async IOs forever +// ioCompletionProcessor processes completed async IOs forever. func ioCompletionProcessor(h syscall.Handle) { for { var bytes uint32 @@ -178,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) { } } -// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) } var timeout timeoutChan @@ -200,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno if f.closing.isSet() { err = ErrFileClosed } @@ -210,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -221,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er // runtime.KeepAlive is needed, as c is passed via native // code to ioCompletionProcessor, c must remain alive // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? runtime.KeepAlive(c) return int(r.bytes), err } // Read reads from a file handle. func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -239,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) { var bytes uint32 err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF } else { return n, err @@ -254,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) { // Write writes to a file handle. func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -266,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) { var bytes uint32 err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 3ab6bff69c5..702950e72a4 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -14,13 +15,18 @@ import ( type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 - pad uint32 // padding + _ uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -48,7 +59,10 @@ type FileStandardInfo struct { // GetFileStandardInfo retrieves ended information for the file. func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -65,7 +79,12 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b2b644d002a..52f1c280f6a 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -4,6 +4,8 @@ package winio import ( + "context" + "errors" "fmt" "io" "net" @@ -12,16 +14,87 @@ import ( "time" "unsafe" + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" "github.com/Microsoft/go-winio/pkg/guid" ) -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind +const afHVSock = 34 // AF_HYPERV -const ( - afHvSock = 34 // AF_HYPERV +// Well known Service and VM IDs +//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - socketError = ^uintptr(0) -) +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} // An HvsockAddr is an address for a AF_HYPERV socket. type HvsockAddr struct { @@ -36,8 +109,10 @@ type rawHvsockAddr struct { ServiceID guid.GUID } +var _ socket.RawSockaddr = &rawHvsockAddr{} + // Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { +func (*HvsockAddr) Network() string { return "hvsock" } @@ -47,14 +122,14 @@ func (addr *HvsockAddr) String() string { // VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g := hvsockVsockServiceTemplate() // make a copy g.Data1 = port return g } func (addr *HvsockAddr) raw() rawHvsockAddr { return rawHvsockAddr{ - Family: afHvSock, + Family: afHVSock, VMID: addr.VMID, ServiceID: addr.ServiceID, } @@ -65,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { addr.ServiceID = raw.ServiceID } +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + // HvsockListener is a socket listener for the AF_HYPERV address family. type HvsockListener struct { sock *win32File addr HvsockAddr } +var _ net.Listener = &HvsockListener{} + // HvsockConn is a connected socket of the AF_HYPERV address family. type HvsockConn struct { sock *win32File local, remote HvsockAddr } -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } @@ -94,12 +197,12 @@ func newHvSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("listen", err) } sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } @@ -121,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr { // Accept waits for the next connection and returns it. func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("accept", err) } @@ -130,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { sock.Close() } }() - c, err := l.sock.prepareIo() + c, err := l.sock.prepareIO() if err != nil { return nil, l.opErr("accept", err) } defer l.sock.wg.Done() // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } + conn := &HvsockConn{ sock: sock, } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + sock = nil return conn, nil } @@ -160,43 +278,171 @@ func (l *HvsockListener) Close() error { return l.sock.Close() } -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() if err != nil { - return nil, err + return nil, conn.opErr(op, err) } defer func() { if sock != nil { sock.Close() } }() - c, err := sock.prepareIo() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) } defer sock.wg.Done() var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) } - conn := &HvsockConn{ - sock: sock, - remote: *addr, + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) } + + conn.sock = sock sock = nil + return conn, nil } -*/ + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} } func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("read", err) } @@ -204,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) } return 0, conn.opErr("read", err) } else if n == 0 { @@ -230,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) { } func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("write", err) } @@ -238,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) } return 0, conn.opErr("write", err) } @@ -257,13 +505,19 @@ func (conn *HvsockConn) IsClosed() bool { return conn.sock.IsClosed() } +// shutdown disables sending or receiving on a socket. func (conn *HvsockConn) shutdown(how int) error { if conn.IsClosed() { - return ErrFileClosed + return socket.ErrSocketClosed } err := syscall.Shutdown(conn.sock.handle, how) if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } return os.NewSyscallError("shutdown", err) } return nil @@ -273,7 +527,7 @@ func (conn *HvsockConn) shutdown(how int) error { func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closeread", err) } return nil } @@ -283,7 +537,7 @@ func (conn *HvsockConn) CloseRead() error { func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closewrite", err) } return nil } @@ -300,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr { // SetDeadline implements the net.Conn SetDeadline method. func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } return nil } diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 00000000000..7e82f9afa95 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 00000000000..39e8c05f8f3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, //overlapped + 0, //completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 00000000000..6d2e1a9e443 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 96700a73de2..ca6e38fc000 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -13,6 +14,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/windows" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe @@ -21,10 +24,10 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl type ioStatusBlock struct { Status, Information uintptr @@ -51,45 +54,22 @@ type securityDescriptor struct { Control uint16 Owner uintptr Group uintptr - Sacl uintptr - Dacl uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl } -type ntstatus int32 +type ntStatus int32 -func (status ntstatus) Err() error { +func (status ntStatus) Err() error { if status >= 0 { return nil } return rtlNtStatusToDosError(status) } -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - var ( // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") + ErrPipeListenerClosed = net.ErrClosed errPipeWriteClosed = errors.New("pipe has been closed for write") ) @@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr { } func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) } // CloseWrite closes the write side of a message pipe in byte mode. @@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return 0, io.EOF } n, err := f.win32File.Read(b) - if err == io.EOF { + if err == io.EOF { //nolint:errorlint // If this was the result of a zero-byte read, then // it is possible that the read was due to a zero-size // message. Since we are simulating CloseWrite with a // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return n, err } -func (s pipeAddress) Network() string { +func (pipeAddress) Network() string { return "pipe" } @@ -184,16 +165,21 @@ func (s pipeAddress) String() string { // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { for { - select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + h, err := createFile(*path, + access, + 0, + nil, + syscall.OPEN_EXISTING, + windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, + 0) if err == nil { return h, nil } - if err != cERROR_PIPE_BUSY { + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno return h, &os.PathError{Err: err, Op: "open", Path: *path} } // Wait 10 msec and try again. This is a rather simplistic @@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(2 * time.Second) } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { + if errors.Is(err, context.DeadlineExceeded) { return nil, ErrTimeout } return conn, err @@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, // If the pipe is in message mode, return a message byte pipe, which // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { + if flags&windows.PIPE_TYPE_MESSAGE != 0 { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: f, path: path}, }, nil @@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy oa.Length = unsafe.Sizeof(oa) var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } defer localFree(ntPath.Buffer) @@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // The security descriptor is only needed for the first pipe. if first { if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) + l := uint32(len(sd)) + sdb := localAlloc(0, l) defer localFree(sdb) copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) @@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // Construct the default named pipe security descriptor. var dacl uintptr if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } defer localFree(dacl) sdb := &securityDescriptor{ Revision: 1, - Control: cSE_DACL_PRESENT, + Control: windows.SE_DACL_PRESENT, Dacl: dacl, } oa.SecurityDescriptor = sdb } } - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE + typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(cFILE_OPEN) + disposition := uint32(windows.FILE_OPEN) access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) if first { - disposition = cFILE_CREATE + disposition = windows.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. @@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy h syscall.Handle iosb ioStatusBlock ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { p.Close() p = nil err = <-ch - if err == nil || err == ErrFileClosed { + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno err = ErrPipeListenerClosed } } @@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() { p, err = l.makeConnectedServerPipe() // If the connection was immediately closed by the client, try // again. - if err != cERROR_NO_DATA { + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno break } } responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } syscall.Close(l.firstHandle) @@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { } func connectPipe(p *win32File) error { - c, err := p.prepareIo() + c, err := p.prepareIO() if err != nil { return err } defer p.wg.Done() err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno return err } return nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index 2d9161e2dee..48ce4e92436 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,5 +1,3 @@ -// +build windows - // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, @@ -9,24 +7,26 @@ package guid import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //nolint:gosec // not used for secure application "encoding" "encoding/binary" "fmt" "strconv" ) +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + // Variant specifies which GUID variant (or "type") of the GUID. It determines // how the entirety of the rest of the GUID is interpreted. type Variant uint8 -// The variants specified by RFC 4122. +// The variants specified by RFC 4122 section 4.1.1. const ( // VariantUnknown specifies a GUID variant which does not conform to one of // the variant encodings specified in RFC 4122. VariantUnknown Variant = iota VariantNCS - VariantRFC4122 + VariantRFC4122 // RFC 4122 VariantMicrosoft VariantFuture ) @@ -36,6 +36,10 @@ const ( // hash of an input string. type Version uint8 +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) @@ -61,7 +65,7 @@ func NewV4() (GUID, error) { // big-endian UTF16 stream of bytes. If that is desired, the string can be // encoded as such before being passed to this function. func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() + b := sha1.New() //nolint:gosec // not used for secure application namespaceBytes := namespace.ToArray() b.Write(namespaceBytes[:]) b.Write(name) diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go index f64d828c0ba..805bd354842 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package guid diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go index 83617f4eee9..27e45ee5ccf 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package guid import "golang.org/x/sys/windows" diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 00000000000..4076d3132fd --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go index 602920786c9..6df87b74990 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package security @@ -20,6 +21,7 @@ type ( trusteeForm uint32 trusteeType uint32 + //nolint:structcheck // structcheck thinks fields are unused, but the are used to pass data to OS explicitAccess struct { accessPermissions accessMask accessMode accessMode @@ -27,6 +29,7 @@ type ( trustee trustee } + //nolint:structcheck,unused // structcheck thinks fields are unused, but the are used to pass data to OS trustee struct { multipleTrustee *trustee multipleTrusteeOperation int32 @@ -44,6 +47,7 @@ const ( desiredAccessReadControl desiredAccess = 0x20000 desiredAccessWriteDac desiredAccess = 0x40000 + //cspell:disable-next-line gvmga = "GrantVmGroupAccess:" inheritModeNoInheritance inheritMode = 0x0 @@ -56,9 +60,9 @@ const ( shareModeRead shareMode = 0x1 shareModeWrite shareMode = 0x2 - sidVmGroup = "S-1-5-83-0" + sidVMGroup = "S-1-5-83-0" - trusteeFormIsSid trusteeForm = 0 + trusteeFormIsSID trusteeForm = 0 trusteeTypeWellKnownGroup trusteeType = 5 ) @@ -67,6 +71,8 @@ const ( // include Grant ACE entries for the VM Group SID. This is a golang re- // implementation of the same function in vmcompute, just not exported in // RS5. Which kind of sucks. Sucks a lot :/ +// +//revive:disable-next-line:var-naming VM, not Vm func GrantVmGroupAccess(name string) error { // Stat (to determine if `name` is a directory). s, err := os.Stat(name) @@ -79,7 +85,7 @@ func GrantVmGroupAccess(name string) error { if err != nil { return err // Already wrapped } - defer syscall.CloseHandle(fd) + defer syscall.CloseHandle(fd) //nolint:errcheck // Get the current DACL and Security Descriptor. Must defer LocalFree on success. ot := objectTypeFileObject @@ -89,7 +95,7 @@ func GrantVmGroupAccess(name string) error { if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) //nolint:errcheck // Generate a new DACL which is the current DACL with the required ACEs added. // Must defer LocalFree on success. @@ -97,7 +103,7 @@ func GrantVmGroupAccess(name string) error { if err != nil { return err // Already wrapped } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) //nolint:errcheck // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { @@ -110,16 +116,19 @@ func GrantVmGroupAccess(name string) error { // createFile is a helper function to call [Nt]CreateFile to get a handle to // the file or directory. func createFile(name string, isDir bool) (syscall.Handle, error) { - namep := syscall.StringToUTF16(name) + namep, err := syscall.UTF16FromString(name) + if err != nil { + return syscall.InvalidHandle, fmt.Errorf("could not convernt name to UTF-16: %w", err) + } da := uint32(desiredAccessReadControl | desiredAccessWriteDac) sm := uint32(shareModeRead | shareModeWrite) fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) if isDir { - fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + fa |= syscall.FILE_FLAG_BACKUP_SEMANTICS } fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) if err != nil { - return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) + return syscall.InvalidHandle, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) } return fd, nil } @@ -128,9 +137,9 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { // The caller is responsible for LocalFree of the returned DACL on success. func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { // Generate pointers to the SIDs based on the string SIDs - sid, err := syscall.StringToSid(sidVmGroup) + sid, err := syscall.StringToSid(sidVMGroup) if err != nil { - return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVMGroup, err) } inheritance := inheritModeNoInheritance @@ -139,12 +148,12 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp } eaArray := []explicitAccess{ - explicitAccess{ + { accessPermissions: accessMaskDesiredPermission, accessMode: accessModeGrant, inheritance: inheritance, trustee: trustee{ - trusteeForm: trusteeFormIsSid, + trusteeForm: trusteeFormIsSID, trusteeType: trusteeTypeWellKnownGroup, name: uintptr(unsafe.Pointer(sid)), }, diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go index d7096716ce2..71326e4e46f 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -1,6 +1,6 @@ package security -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go //sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo //sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go index 4084680e0f0..26c986b88fe 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package security diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index c3dd7c21769..0ff9dac906d 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -24,22 +25,17 @@ import ( //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW const ( - SE_PRIVILEGE_ENABLED = 2 + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" SeSecurityPrivilege = "SeSecurityPrivilege" ) -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - var ( privNames = make(map[string]uint64) privNameMutex sync.Mutex @@ -51,11 +47,9 @@ type PrivilegeError struct { } func (e *PrivilegeError) Error() string { - s := "" + s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " - } else { - s = "Could not enable privilege " } for i, p := range e.privileges { if i != 0 { @@ -94,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error { } func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 + privileges := make([]uint64, 0, len(names)) privNameMutex.Lock() defer privNameMutex.Unlock() for _, name := range names { @@ -127,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { return err } - p, _ := windows.GetCurrentProcess() + p := windows.CurrentProcess() var token windows.Token err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) if err != nil { @@ -140,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) @@ -151,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e if !success { return err } - if err == ERROR_NOT_ALL_ASSIGNED { + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno return &PrivilegeError{privileges} } return nil @@ -177,7 +171,7 @@ func getPrivilegeName(luid uint64) string { } func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) + err := impersonateSelf(windows.SecurityImpersonation) if err != nil { return 0, err } diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go index fc1ee4d3a3e..67d1a104a63 100644 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package winio import ( @@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte { } var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) + _ = binary.Write(&b, binary.LittleEndian, &data) if !rp.IsMountPoint { flags := uint32(0) if relative { flags |= 1 } - binary.Write(&b, binary.LittleEndian, flags) + _ = binary.Write(&b, binary.LittleEndian, flags) } - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) return b.Bytes() } diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index db1b370a1b5..5550ef6b61e 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -1,23 +1,25 @@ +//go:build windows // +build windows package winio import ( + "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) //sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW //sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW //sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW //sys localFree(mem uintptr) = LocalFree //sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - type AccountLookupError struct { Name string Err error @@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string { return "lookup account: empty account name specified" } var s string - switch e.Err { - case cERROR_NONE_MAPPED: + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): s = "not found" default: s = e.Err.Error() @@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string { return "lookup account " + e.Name + ": " + s } +func (e *AccountLookupError) Unwrap() error { return e.Err } + type SddlConversionError struct { Sddl string Err error @@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string { return "convert " + e.Sddl + ": " + e.Err.Error() } +func (e *SddlConversionError) Unwrap() error { return e.Err } + // LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid func LookupSidByName(name string) (sid string, err error) { if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} } var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) { return sid, nil } +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + func SddlToSecurityDescriptor(sddl string) ([]byte, error) { var sdBuffer uintptr err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) @@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) { func SecurityDescriptorToSddl(sd []byte) (string, error) { var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. + // The returned string length seems to include an arbitrary number of terminating NULs. // Don't use it. err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 5955c99fdea..a6ca111b39c 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,5 @@ +//go:build windows + package winio -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 00000000000..2aa045843ea --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index f7f78fc2304..b54cad11270 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -11,7 +11,7 @@ import ( "golang.org/x/sys/windows" ) -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go //sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk //sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk @@ -62,8 +62,8 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } -// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, -// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating // `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. type openVersion2 struct { getInfoOnly int32 @@ -87,9 +87,10 @@ type AttachVirtualDiskParameters struct { } const ( + //revive:disable-next-line:var-naming ALL_CAPS VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - // Access Mask for opening a VHD + // Access Mask for opening a VHD. VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 @@ -101,7 +102,7 @@ const ( VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - // Flags for creating a VHD + // Flags for creating a VHD. CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 @@ -109,12 +110,12 @@ const ( CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - // Flags for opening a VHD + // Flags for opening a VHD. OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 @@ -127,7 +128,7 @@ const ( OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - // Flags for attaching a VHD + // Flags for attaching a VHD. AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 @@ -140,12 +141,14 @@ const ( AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - // Flags for detaching a VHD + // Flags for detaching a VHD. DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 ) // CreateVhdx is a helper function to create a simple vhdx file at the given path using // default values. +// +//revive:disable-next-line:var-naming VHDX, not Vhdx func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { params := CreateVirtualDiskParameters{ Version: 2, @@ -172,6 +175,8 @@ func DetachVirtualDisk(handle syscall.Handle) (err error) { } // DetachVhd detaches a vhd found at `path`. +// +//revive:disable-next-line:var-naming VHD, not Vhd func DetachVhd(path string) error { handle, err := OpenVirtualDisk( path, @@ -181,12 +186,16 @@ func DetachVhd(path string) error { if err != nil { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck return DetachVirtualDisk(handle) } // AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { +func AttachVirtualDisk( + handle syscall.Handle, + attachVirtualDiskFlag AttachVirtualDiskFlag, + parameters *AttachVirtualDiskParameters, +) (err error) { // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. if err := attachVirtualDisk( handle, @@ -203,6 +212,8 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua // AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 // of the ATTACH_VIRTUAL_DISK_PARAMETERS. +// +//revive:disable-next-line:var-naming VHD, not Vhd func AttachVhd(path string) (err error) { handle, err := OpenVirtualDisk( path, @@ -213,7 +224,7 @@ func AttachVhd(path string) (err error) { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck params := AttachVirtualDiskParameters{Version: 2} if err := AttachVirtualDisk( handle, @@ -226,7 +237,11 @@ func AttachVhd(path string) (err error) { } // OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { +func OpenVirtualDisk( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, +) (syscall.Handle, error) { parameters := OpenVirtualDiskParameters{Version: 2} handle, err := OpenVirtualDiskWithParameters( vhdPath, @@ -241,7 +256,12 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask } // OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { +func OpenVirtualDiskWithParameters( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, + parameters *OpenVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType @@ -279,7 +299,12 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual } // CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { +func CreateVirtualDisk( + path string, + virtualDiskAccessMask VirtualDiskAccessMask, + createVirtualDiskFlags CreateVirtualDiskFlag, + parameters *CreateVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType @@ -323,6 +348,8 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { } // CreateDiffVhd is a helper function to create a differencing virtual disk. +// +//revive:disable-next-line:var-naming VHD, not Vhd func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { // Setting `ParentPath` is how to signal to create a differencing disk. createParams := &CreateVirtualDiskParameters{ diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 1d7498db3be..d0e917d2be3 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package vhd diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 176ff75e320..83f45a1351b 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winio @@ -47,9 +49,11 @@ var ( procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") @@ -74,7 +78,6 @@ var ( procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision return } +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func getSecurityDescriptorLength(sd uintptr) (len uint32) { r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) len = uint32(r0) @@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS return } +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) @@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { +func rtlNtStatusToDosError(status ntStatus) (winerr error) { r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) if r0 != 0 { winerr = syscall.Errno(r0) @@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint } return } - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c8073c..00000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6d5..00000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c49866..00000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190a18..00000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f91e..00000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a5396..00000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b84624594d..00000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/blang/semver/v4/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 00000000000..5ba5c86fcb0 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/v4/json.go b/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 00000000000..a74bf7c4494 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 00000000000..95f7139b977 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/v4/semver.go b/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 00000000000..307de610f92 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/vendor/github.com/blang/semver/v4/sort.go b/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 00000000000..e18f880826a --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/v4/sql.go b/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 00000000000..db958134f3b --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md index fab974b7f34..4b0a5ff5818 100644 --- a/vendor/github.com/chzyer/readline/README.md +++ b/vendor/github.com/chzyer/readline/README.md @@ -11,7 +11,7 @@

-A powerful readline library in `Linux` `macOS` `Windows` `Solaris` +A powerful readline library in `Linux` `macOS` `Windows` `Solaris` `AIX` ## Guide diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go index 4c31624f806..b60939a91f1 100644 --- a/vendor/github.com/chzyer/readline/operation.go +++ b/vendor/github.com/chzyer/readline/operation.go @@ -109,10 +109,12 @@ func (o *Operation) ioloop() { keepInSearchMode := false keepInCompleteMode := false r := o.t.ReadRune() + if o.GetConfig().FuncFilterInputRune != nil { var process bool r, process = o.GetConfig().FuncFilterInputRune(r) if !process { + o.t.KickRead() o.buf.Refresh(nil) // to refresh the line continue // ignore this rune } @@ -434,6 +436,10 @@ func (o *Operation) Slice() ([]byte, error) { } func (o *Operation) Close() { + select { + case o.errchan <- io.EOF: + default: + } o.history.Close() } diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go index 0e7aca06d5a..63b9171012e 100644 --- a/vendor/github.com/chzyer/readline/readline.go +++ b/vendor/github.com/chzyer/readline/readline.go @@ -17,7 +17,9 @@ // package readline -import "io" +import ( + "io" +) type Instance struct { Config *Config @@ -270,14 +272,24 @@ func (i *Instance) ReadSlice() ([]byte, error) { } // we must make sure that call Close() before process exit. +// if there has a pending reading operation, that reading will be interrupted. +// so you can capture the signal and call Instance.Close(), it's thread-safe. func (i *Instance) Close() error { + i.Config.Stdin.Close() + i.Operation.Close() if err := i.Terminal.Close(); err != nil { return err } - i.Config.Stdin.Close() - i.Operation.Close() return nil } + +// call CaptureExitSignal when you want readline exit gracefully. +func (i *Instance) CaptureExitSignal() { + CaptureExitSignal(func() { + i.Close() + }) +} + func (i *Instance) Clean() { i.Operation.Clean() } diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go index 81d2da50ccb..d95df1e36b6 100644 --- a/vendor/github.com/chzyer/readline/runebuf.go +++ b/vendor/github.com/chzyer/readline/runebuf.go @@ -35,7 +35,7 @@ type RuneBuffer struct { sync.Mutex } -func (r* RuneBuffer) pushKill(text []rune) { +func (r *RuneBuffer) pushKill(text []rune) { r.lastKill = append([]rune{}, text...) } @@ -221,7 +221,7 @@ func (r *RuneBuffer) DeleteWord() { } for i := init + 1; i < len(r.buf); i++ { if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.pushKill(r.buf[r.idx:i-1]) + r.pushKill(r.buf[r.idx : i-1]) r.Refresh(func() { r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) }) @@ -350,7 +350,7 @@ func (r *RuneBuffer) Yank() { return } r.Refresh(func() { - buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) + buf := make([]rune, 0, len(r.buf)+len(r.lastKill)) buf = append(buf, r.buf[:r.idx]...) buf = append(buf, r.lastKill...) buf = append(buf, r.buf[r.idx:]...) diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go index 133993ca8ea..ea5db9346e8 100644 --- a/vendor/github.com/chzyer/readline/term.go +++ b/vendor/github.com/chzyer/readline/term.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_nosyscall6.go similarity index 96% rename from vendor/github.com/chzyer/readline/term_solaris.go rename to vendor/github.com/chzyer/readline/term_nosyscall6.go index 4c27273c7ab..df923393790 100644 --- a/vendor/github.com/chzyer/readline/term_solaris.go +++ b/vendor/github.com/chzyer/readline/term_nosyscall6.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build solaris +// +build aix os400 solaris package readline diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go index 1078631c14a..38413d0cf68 100644 --- a/vendor/github.com/chzyer/readline/terminal.go +++ b/vendor/github.com/chzyer/readline/terminal.go @@ -125,6 +125,7 @@ func (t *Terminal) ioloop() { var ( isEscape bool isEscapeEx bool + isEscapeSS3 bool expectNextChar bool ) @@ -152,9 +153,15 @@ func (t *Terminal) ioloop() { if isEscape { isEscape = false if r == CharEscapeEx { + // ^][ expectNextChar = true isEscapeEx = true continue + } else if r == CharO { + // ^]O + expectNextChar = true + isEscapeSS3 = true + continue } r = escapeKey(r, buf) } else if isEscapeEx { @@ -177,6 +184,15 @@ func (t *Terminal) ioloop() { expectNextChar = true continue } + } else if isEscapeSS3 { + isEscapeSS3 = false + if key := readEscKey(r, buf); key != nil { + r = escapeSS3Key(key) + } + if r == 0 { + expectNextChar = true + continue + } } expectNextChar = true diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go index af4e005216f..0706dd4ec51 100644 --- a/vendor/github.com/chzyer/readline/utils.go +++ b/vendor/github.com/chzyer/readline/utils.go @@ -6,9 +6,11 @@ import ( "container/list" "fmt" "os" + "os/signal" "strconv" "strings" "sync" + "syscall" "time" "unicode" ) @@ -41,6 +43,7 @@ const ( CharCtrlY = 25 CharCtrlZ = 26 CharEsc = 27 + CharO = 79 CharEscapeEx = 91 CharBackspace = 127 ) @@ -121,6 +124,27 @@ func escapeExKey(key *escapeKeyPair) rune { return r } +// translate EscOX SS3 codes for up/down/etc. +func escapeSS3Key(key *escapeKeyPair) rune { + var r rune + switch key.typ { + case 'D': + r = CharBackward + case 'C': + r = CharForward + case 'A': + r = CharPrev + case 'B': + r = CharNext + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + default: + } + return r +} + type escapeKeyPair struct { attr string typ rune @@ -275,3 +299,13 @@ func Debug(o ...interface{}) { fmt.Fprintln(f, o...) f.Close() } + +func CaptureExitSignal(f func()) { + cSignal := make(chan os.Signal, 1) + signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM) + go func() { + for range cSignal { + f() + } + }() +} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go index f88dac97bd7..fc49492326e 100644 --- a/vendor/github.com/chzyer/readline/utils_unix.go +++ b/vendor/github.com/chzyer/readline/utils_unix.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris package readline diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/container-orchestrated-devices/container-device-interface/LICENSE similarity index 93% rename from vendor/github.com/docker/go-metrics/LICENSE rename to vendor/github.com/container-orchestrated-devices/container-device-interface/LICENSE index 8f3fee627a4..261eeb9e9f8 100644 --- a/vendor/github.com/docker/go-metrics/LICENSE +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go new file mode 100644 index 00000000000..07aca4a1d3e --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go @@ -0,0 +1,82 @@ +/* + Copyright © 2022 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package multierror + +import ( + "strings" +) + +// New combines several errors into a single error. Parameters that are nil are +// ignored. If no errors are passed in or all parameters are nil, then the +// result is also nil. +func New(errors ...error) error { + // Filter out nil entries. + numErrors := 0 + for _, err := range errors { + if err != nil { + errors[numErrors] = err + numErrors++ + } + } + if numErrors == 0 { + return nil + } + return multiError(errors[0:numErrors]) +} + +// multiError is the underlying implementation used by New. +// +// Beware that a null multiError is not the same as a nil error. +type multiError []error + +// multiError returns all individual error strings concatenated with "\n" +func (e multiError) Error() string { + var builder strings.Builder + for i, err := range e { + if i > 0 { + _, _ = builder.WriteString("\n") + } + _, _ = builder.WriteString(err.Error()) + } + return builder.String() +} + +// Append returns a new multi error all errors concatenated. Errors that are +// multi errors get flattened, nil is ignored. +func Append(err error, errors ...error) error { + var result multiError + if m, ok := err.(multiError); ok { + result = m + } else if err != nil { + result = append(result, err) + } + + for _, e := range errors { + if e == nil { + continue + } + if m, ok := e.(multiError); ok { + result = append(result, m...) + } else { + result = append(result, e) + } + } + if len(result) == 0 { + return nil + } + return result +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go new file mode 100644 index 00000000000..1055c7df8fa --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go @@ -0,0 +1,139 @@ +/* + Copyright © 2021-2022 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "strings" + + "github.com/pkg/errors" +) + +const ( + // AnnotationPrefix is the prefix for CDI container annotation keys. + AnnotationPrefix = "cdi.k8s.io/" +) + +// UpdateAnnotations updates annotations with a plugin-specific CDI device +// injection request for the given devices. Upon any error a non-nil error +// is returned and annotations are left intact. By convention plugin should +// be in the format of "vendor.device-type". +func UpdateAnnotations(annotations map[string]string, plugin string, deviceID string, devices []string) (map[string]string, error) { + key, err := AnnotationKey(plugin, deviceID) + if err != nil { + return annotations, errors.Wrap(err, "CDI annotation failed") + } + if _, ok := annotations[key]; ok { + return annotations, errors.Errorf("CDI annotation failed, key %q used", key) + } + value, err := AnnotationValue(devices) + if err != nil { + return annotations, errors.Wrap(err, "CDI annotation failed") + } + + if annotations == nil { + annotations = make(map[string]string) + } + annotations[key] = value + + return annotations, nil +} + +// ParseAnnotations parses annotations for CDI device injection requests. +// The keys and devices from all such requests are collected into slices +// which are returned as the result. All devices are expected to be fully +// qualified CDI device names. If any device fails this check empty slices +// are returned along with a non-nil error. The annotations are expected +// to be formatted by, or in a compatible fashion to UpdateAnnotations(). +func ParseAnnotations(annotations map[string]string) ([]string, []string, error) { + var ( + keys []string + devices []string + ) + + for key, value := range annotations { + if !strings.HasPrefix(key, AnnotationPrefix) { + continue + } + for _, d := range strings.Split(value, ",") { + if !IsQualifiedName(d) { + return nil, nil, errors.Errorf("invalid CDI device name %q", d) + } + devices = append(devices, d) + } + keys = append(keys, key) + } + + return keys, devices, nil +} + +// AnnotationKey returns a unique annotation key for an device allocation +// by a K8s device plugin. pluginName should be in the format of +// "vendor.device-type". deviceID is the ID of the device the plugin is +// allocating. It is used to make sure that the generated key is unique +// even if multiple allocations by a single plugin needs to be annotated. +func AnnotationKey(pluginName, deviceID string) (string, error) { + const maxNameLen = 63 + + if pluginName == "" { + return "", errors.New("invalid plugin name, empty") + } + if deviceID == "" { + return "", errors.New("invalid deviceID, empty") + } + + name := pluginName + "_" + strings.ReplaceAll(deviceID, "/", "_") + + if len(name) > maxNameLen { + return "", errors.Errorf("invalid plugin+deviceID %q, too long", name) + } + + if c := rune(name[0]); !isAlphaNumeric(c) { + return "", errors.Errorf("invalid name %q, first '%c' should be alphanumeric", + name, c) + } + if len(name) > 2 { + for _, c := range name[1 : len(name)-1] { + switch { + case isAlphaNumeric(c): + case c == '_' || c == '-' || c == '.': + default: + return "", errors.Errorf("invalid name %q, invalid charcter '%c'", + name, c) + } + } + } + if c := rune(name[len(name)-1]); !isAlphaNumeric(c) { + return "", errors.Errorf("invalid name %q, last '%c' should be alphanumeric", + name, c) + } + + return AnnotationPrefix + name, nil +} + +// AnnotationValue returns an annotation value for the given devices. +func AnnotationValue(devices []string) (string, error) { + value, sep := "", "" + for _, d := range devices { + if _, _, _, err := ParseQualifiedName(d); err != nil { + return "", err + } + value += sep + d + sep = "," + } + + return value, nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go new file mode 100644 index 00000000000..dee854862de --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go @@ -0,0 +1,572 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + stderr "errors" + + "github.com/container-orchestrated-devices/container-device-interface/internal/multierror" + cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" + "github.com/fsnotify/fsnotify" + oci "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// Option is an option to change some aspect of default CDI behavior. +type Option func(*Cache) error + +// Cache stores CDI Specs loaded from Spec directories. +type Cache struct { + sync.Mutex + specDirs []string + specs map[string][]*Spec + devices map[string]*Device + errors map[string][]error + dirErrors map[string]error + + autoRefresh bool + watch *watch +} + +// WithAutoRefresh returns an option to control automatic Cache refresh. +// By default auto-refresh is enabled, the list of Spec directories are +// monitored and the Cache is automatically refreshed whenever a change +// is detected. This option can be used to disable this behavior when a +// manually refreshed mode is preferable. +func WithAutoRefresh(autoRefresh bool) Option { + return func(c *Cache) error { + c.autoRefresh = autoRefresh + return nil + } +} + +// NewCache creates a new CDI Cache. The cache is populated from a set +// of CDI Spec directories. These can be specified using a WithSpecDirs +// option. The default set of directories is exposed in DefaultSpecDirs. +func NewCache(options ...Option) (*Cache, error) { + c := &Cache{ + autoRefresh: true, + watch: &watch{}, + } + + WithSpecDirs(DefaultSpecDirs...)(c) + c.Lock() + defer c.Unlock() + + return c, c.configure(options...) +} + +// Configure applies options to the Cache. Updates and refreshes the +// Cache if options have changed. +func (c *Cache) Configure(options ...Option) error { + if len(options) == 0 { + return nil + } + + c.Lock() + defer c.Unlock() + + return c.configure(options...) +} + +// Configure the Cache. Start/stop CDI Spec directory watch, refresh +// the Cache if necessary. +func (c *Cache) configure(options ...Option) error { + var err error + + for _, o := range options { + if err = o(c); err != nil { + return errors.Wrapf(err, "failed to apply cache options") + } + } + + c.dirErrors = make(map[string]error) + + c.watch.stop() + if c.autoRefresh { + c.watch.setup(c.specDirs, c.dirErrors) + c.watch.start(&c.Mutex, c.refresh, c.dirErrors) + } + c.refresh() + + return nil +} + +// Refresh rescans the CDI Spec directories and refreshes the Cache. +// In manual refresh mode the cache is always refreshed. In auto- +// refresh mode the cache is only refreshed if it is out of date. +func (c *Cache) Refresh() error { + c.Lock() + defer c.Unlock() + + // force a refresh in manual mode + if refreshed, err := c.refreshIfRequired(!c.autoRefresh); refreshed { + return err + } + + // collect and return cached errors, much like refresh() does it + var result error + for _, errors := range c.errors { + result = multierror.Append(result, errors...) + } + return result +} + +// Refresh the Cache by rescanning CDI Spec directories and files. +func (c *Cache) refresh() error { + var ( + specs = map[string][]*Spec{} + devices = map[string]*Device{} + conflicts = map[string]struct{}{} + specErrors = map[string][]error{} + result []error + ) + + // collect errors per spec file path and once globally + collectError := func(err error, paths ...string) { + result = append(result, err) + for _, path := range paths { + specErrors[path] = append(specErrors[path], err) + } + } + // resolve conflicts based on device Spec priority (order of precedence) + resolveConflict := func(name string, dev *Device, old *Device) bool { + devSpec, oldSpec := dev.GetSpec(), old.GetSpec() + devPrio, oldPrio := devSpec.GetPriority(), oldSpec.GetPriority() + switch { + case devPrio > oldPrio: + return false + case devPrio == oldPrio: + devPath, oldPath := devSpec.GetPath(), oldSpec.GetPath() + collectError(errors.Errorf("conflicting device %q (specs %q, %q)", + name, devPath, oldPath), devPath, oldPath) + conflicts[name] = struct{}{} + } + return true + } + + _ = scanSpecDirs(c.specDirs, func(path string, priority int, spec *Spec, err error) error { + path = filepath.Clean(path) + if err != nil { + collectError(errors.Wrapf(err, "failed to load CDI Spec"), path) + return nil + } + + vendor := spec.GetVendor() + specs[vendor] = append(specs[vendor], spec) + + for _, dev := range spec.devices { + qualified := dev.GetQualifiedName() + other, ok := devices[qualified] + if ok { + if resolveConflict(qualified, dev, other) { + continue + } + } + devices[qualified] = dev + } + + return nil + }) + + for conflict := range conflicts { + delete(devices, conflict) + } + + c.specs = specs + c.devices = devices + c.errors = specErrors + + return multierror.New(result...) +} + +// RefreshIfRequired triggers a refresh if necessary. +func (c *Cache) refreshIfRequired(force bool) (bool, error) { + // We need to refresh if + // - it's forced by an explicitly call to Refresh() in manual mode + // - a missing Spec dir appears (added to watch) in auto-refresh mode + if force || (c.autoRefresh && c.watch.update(c.dirErrors)) { + return true, c.refresh() + } + return false, nil +} + +// InjectDevices injects the given qualified devices to an OCI Spec. It +// returns any unresolvable devices and an error if injection fails for +// any of the devices. +func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, error) { + var unresolved []string + + if ociSpec == nil { + return devices, errors.Errorf("can't inject devices, nil OCI Spec") + } + + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + edits := &ContainerEdits{} + specs := map[*Spec]struct{}{} + + for _, device := range devices { + d := c.devices[device] + if d == nil { + unresolved = append(unresolved, device) + continue + } + if _, ok := specs[d.GetSpec()]; !ok { + specs[d.GetSpec()] = struct{}{} + edits.Append(d.GetSpec().edits()) + } + edits.Append(d.edits()) + } + + if unresolved != nil { + return unresolved, errors.Errorf("unresolvable CDI devices %s", + strings.Join(devices, ", ")) + } + + if err := edits.Apply(ociSpec); err != nil { + return nil, errors.Wrap(err, "failed to inject devices") + } + + return nil, nil +} + +// highestPrioritySpecDir returns the Spec directory with highest priority +// and its priority. +func (c *Cache) highestPrioritySpecDir() (string, int) { + if len(c.specDirs) == 0 { + return "", -1 + } + + prio := len(c.specDirs) - 1 + dir := c.specDirs[prio] + + return dir, prio +} + +// WriteSpec writes a Spec file with the given content into the highest +// priority Spec directory. If name has a "json" or "yaml" extension it +// choses the encoding. Otherwise the default YAML encoding is used. +func (c *Cache) WriteSpec(raw *cdi.Spec, name string) error { + var ( + specDir string + path string + prio int + spec *Spec + err error + ) + + specDir, prio = c.highestPrioritySpecDir() + if specDir == "" { + return errors.New("no Spec directories to write to") + } + + path = filepath.Join(specDir, name) + if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { + path += defaultSpecExt + } + + spec, err = newSpec(raw, path, prio) + if err != nil { + return err + } + + return spec.write(true) +} + +// RemoveSpec removes a Spec with the given name from the highest +// priority Spec directory. This function can be used to remove a +// Spec previously written by WriteSpec(). If the file exists and +// its removal fails RemoveSpec returns an error. +func (c *Cache) RemoveSpec(name string) error { + var ( + specDir string + path string + err error + ) + + specDir, _ = c.highestPrioritySpecDir() + if specDir == "" { + return errors.New("no Spec directories to remove from") + } + + path = filepath.Join(specDir, name) + if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { + path += defaultSpecExt + } + + err = os.Remove(path) + if err != nil && stderr.Is(err, fs.ErrNotExist) { + err = nil + } + + return err +} + +// GetDevice returns the cached device for the given qualified name. +func (c *Cache) GetDevice(device string) *Device { + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + return c.devices[device] +} + +// ListDevices lists all cached devices by qualified name. +func (c *Cache) ListDevices() []string { + var devices []string + + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + for name := range c.devices { + devices = append(devices, name) + } + sort.Strings(devices) + + return devices +} + +// ListVendors lists all vendors known to the cache. +func (c *Cache) ListVendors() []string { + var vendors []string + + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + for vendor := range c.specs { + vendors = append(vendors, vendor) + } + sort.Strings(vendors) + + return vendors +} + +// ListClasses lists all device classes known to the cache. +func (c *Cache) ListClasses() []string { + var ( + cmap = map[string]struct{}{} + classes []string + ) + + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + for _, specs := range c.specs { + for _, spec := range specs { + cmap[spec.GetClass()] = struct{}{} + } + } + for class := range cmap { + classes = append(classes, class) + } + sort.Strings(classes) + + return classes +} + +// GetVendorSpecs returns all specs for the given vendor. +func (c *Cache) GetVendorSpecs(vendor string) []*Spec { + c.Lock() + defer c.Unlock() + + c.refreshIfRequired(false) + + return c.specs[vendor] +} + +// GetSpecErrors returns all errors encountered for the spec during the +// last cache refresh. +func (c *Cache) GetSpecErrors(spec *Spec) []error { + return c.errors[spec.GetPath()] +} + +// GetErrors returns all errors encountered during the last +// cache refresh. +func (c *Cache) GetErrors() map[string][]error { + c.Lock() + defer c.Unlock() + + errors := map[string][]error{} + for path, errs := range c.errors { + errors[path] = errs + } + for path, err := range c.dirErrors { + errors[path] = []error{err} + } + + return errors +} + +// GetSpecDirectories returns the CDI Spec directories currently in use. +func (c *Cache) GetSpecDirectories() []string { + c.Lock() + defer c.Unlock() + + dirs := make([]string, len(c.specDirs)) + copy(dirs, c.specDirs) + return dirs +} + +// GetSpecDirErrors returns any errors related to configured Spec directories. +func (c *Cache) GetSpecDirErrors() map[string]error { + if c.dirErrors == nil { + return nil + } + + c.Lock() + defer c.Unlock() + + errors := make(map[string]error) + for dir, err := range c.dirErrors { + errors[dir] = err + } + return errors +} + +// Our fsnotify helper wrapper. +type watch struct { + watcher *fsnotify.Watcher + tracked map[string]bool +} + +// Setup monitoring for the given Spec directories. +func (w *watch) setup(dirs []string, dirErrors map[string]error) { + var ( + dir string + err error + ) + w.tracked = make(map[string]bool) + for _, dir = range dirs { + w.tracked[dir] = false + } + + w.watcher, err = fsnotify.NewWatcher() + if err != nil { + for _, dir := range dirs { + dirErrors[dir] = errors.Wrap(err, "failed to create watcher") + } + return + } + + w.update(dirErrors) +} + +// Start watching Spec directories for relevant changes. +func (w *watch) start(m *sync.Mutex, refresh func() error, dirErrors map[string]error) { + go w.watch(w.watcher, m, refresh, dirErrors) +} + +// Stop watching directories. +func (w *watch) stop() { + if w.watcher == nil { + return + } + + w.watcher.Close() + w.tracked = nil +} + +// Watch Spec directory changes, triggering a refresh if necessary. +func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error, dirErrors map[string]error) { + watch := fsw + if watch == nil { + return + } + for { + select { + case event, ok := <-watch.Events: + if !ok { + return + } + + if (event.Op & (fsnotify.Rename | fsnotify.Remove | fsnotify.Write)) == 0 { + continue + } + if event.Op == fsnotify.Write { + if ext := filepath.Ext(event.Name); ext != ".json" && ext != ".yaml" { + continue + } + } + + m.Lock() + if event.Op == fsnotify.Remove && w.tracked[event.Name] { + w.update(dirErrors, event.Name) + } else { + w.update(dirErrors) + } + refresh() + m.Unlock() + + case _, ok := <-watch.Errors: + if !ok { + return + } + } + } +} + +// Update watch with pending/missing or removed directories. +func (w *watch) update(dirErrors map[string]error, removed ...string) bool { + var ( + dir string + ok bool + err error + update bool + ) + + for dir, ok = range w.tracked { + if ok { + continue + } + + err = w.watcher.Add(dir) + if err == nil { + w.tracked[dir] = true + delete(dirErrors, dir) + update = true + } else { + w.tracked[dir] = false + dirErrors[dir] = errors.Wrap(err, "failed to monitor for changes") + } + } + + for _, dir = range removed { + w.tracked[dir] = false + dirErrors[dir] = errors.New("directory removed") + update = true + } + + return update +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go new file mode 100644 index 00000000000..0ee5fb86f56 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go @@ -0,0 +1,26 @@ +//go:build !windows +// +build !windows + +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import "syscall" + +func osSync() { + syscall.Sync() +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go new file mode 100644 index 00000000000..c6dabf5fa84 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go @@ -0,0 +1,22 @@ +//go:build windows +// +build windows + +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +func osSync() {} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go new file mode 100644 index 00000000000..9fcecf8497b --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go @@ -0,0 +1,331 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "os" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + + "github.com/container-orchestrated-devices/container-device-interface/specs-go" + oci "github.com/opencontainers/runtime-spec/specs-go" + ocigen "github.com/opencontainers/runtime-tools/generate" +) + +const ( + // PrestartHook is the name of the OCI "prestart" hook. + PrestartHook = "prestart" + // CreateRuntimeHook is the name of the OCI "createRuntime" hook. + CreateRuntimeHook = "createRuntime" + // CreateContainerHook is the name of the OCI "createContainer" hook. + CreateContainerHook = "createContainer" + // StartContainerHook is the name of the OCI "startContainer" hook. + StartContainerHook = "startContainer" + // PoststartHook is the name of the OCI "poststart" hook. + PoststartHook = "poststart" + // PoststopHook is the name of the OCI "poststop" hook. + PoststopHook = "poststop" +) + +var ( + // Names of recognized hooks. + validHookNames = map[string]struct{}{ + PrestartHook: {}, + CreateRuntimeHook: {}, + CreateContainerHook: {}, + StartContainerHook: {}, + PoststartHook: {}, + PoststopHook: {}, + } +) + +// ContainerEdits represent updates to be applied to an OCI Spec. +// These updates can be specific to a CDI device, or they can be +// specific to a CDI Spec. In the former case these edits should +// be applied to all OCI Specs where the corresponding CDI device +// is injected. In the latter case, these edits should be applied +// to all OCI Specs where at least one devices from the CDI Spec +// is injected. +type ContainerEdits struct { + *specs.ContainerEdits +} + +// Apply edits to the given OCI Spec. Updates the OCI Spec in place. +// Returns an error if the update fails. +func (e *ContainerEdits) Apply(spec *oci.Spec) error { + if spec == nil { + return errors.New("can't edit nil OCI Spec") + } + if e == nil || e.ContainerEdits == nil { + return nil + } + + specgen := ocigen.NewFromSpec(spec) + if len(e.Env) > 0 { + specgen.AddMultipleProcessEnv(e.Env) + } + + for _, d := range e.DeviceNodes { + dn := DeviceNode{d} + + err := dn.fillMissingInfo() + if err != nil { + return err + } + dev := d.ToOCI() + if dev.UID == nil && spec.Process != nil { + if uid := spec.Process.User.UID; uid > 0 { + dev.UID = &uid + } + } + if dev.GID == nil && spec.Process != nil { + if gid := spec.Process.User.GID; gid > 0 { + dev.GID = &gid + } + } + + specgen.RemoveDevice(dev.Path) + specgen.AddDevice(dev) + + if dev.Type == "b" || dev.Type == "c" { + access := d.Permissions + if access == "" { + access = "rwm" + } + specgen.AddLinuxResourcesDevice(true, dev.Type, &dev.Major, &dev.Minor, access) + } + } + + if len(e.Mounts) > 0 { + for _, m := range e.Mounts { + specgen.RemoveMount(m.ContainerPath) + specgen.AddMount(m.ToOCI()) + } + sortMounts(&specgen) + } + + for _, h := range e.Hooks { + switch h.HookName { + case PrestartHook: + specgen.AddPreStartHook(h.ToOCI()) + case PoststartHook: + specgen.AddPostStartHook(h.ToOCI()) + case PoststopHook: + specgen.AddPostStopHook(h.ToOCI()) + // TODO: Maybe runtime-tools/generate should be updated with these... + case CreateRuntimeHook: + ensureOCIHooks(spec) + spec.Hooks.CreateRuntime = append(spec.Hooks.CreateRuntime, h.ToOCI()) + case CreateContainerHook: + ensureOCIHooks(spec) + spec.Hooks.CreateContainer = append(spec.Hooks.CreateContainer, h.ToOCI()) + case StartContainerHook: + ensureOCIHooks(spec) + spec.Hooks.StartContainer = append(spec.Hooks.StartContainer, h.ToOCI()) + default: + return errors.Errorf("unknown hook name %q", h.HookName) + } + } + + return nil +} + +// Validate container edits. +func (e *ContainerEdits) Validate() error { + if e == nil || e.ContainerEdits == nil { + return nil + } + + if err := ValidateEnv(e.Env); err != nil { + return errors.Wrap(err, "invalid container edits") + } + for _, d := range e.DeviceNodes { + if err := (&DeviceNode{d}).Validate(); err != nil { + return err + } + } + for _, h := range e.Hooks { + if err := (&Hook{h}).Validate(); err != nil { + return err + } + } + for _, m := range e.Mounts { + if err := (&Mount{m}).Validate(); err != nil { + return err + } + } + + return nil +} + +// Append other edits into this one. If called with a nil receiver, +// allocates and returns newly allocated edits. +func (e *ContainerEdits) Append(o *ContainerEdits) *ContainerEdits { + if o == nil || o.ContainerEdits == nil { + return e + } + if e == nil { + e = &ContainerEdits{} + } + if e.ContainerEdits == nil { + e.ContainerEdits = &specs.ContainerEdits{} + } + + e.Env = append(e.Env, o.Env...) + e.DeviceNodes = append(e.DeviceNodes, o.DeviceNodes...) + e.Hooks = append(e.Hooks, o.Hooks...) + e.Mounts = append(e.Mounts, o.Mounts...) + + return e +} + +// isEmpty returns true if these edits are empty. This is valid in a +// global Spec context but invalid in a Device context. +func (e *ContainerEdits) isEmpty() bool { + if e == nil { + return false + } + return len(e.Env)+len(e.DeviceNodes)+len(e.Hooks)+len(e.Mounts) == 0 +} + +// ValidateEnv validates the given environment variables. +func ValidateEnv(env []string) error { + for _, v := range env { + if strings.IndexByte(v, byte('=')) <= 0 { + return errors.Errorf("invalid environment variable %q", v) + } + } + return nil +} + +// DeviceNode is a CDI Spec DeviceNode wrapper, used for validating DeviceNodes. +type DeviceNode struct { + *specs.DeviceNode +} + +// Validate a CDI Spec DeviceNode. +func (d *DeviceNode) Validate() error { + validTypes := map[string]struct{}{ + "": {}, + "b": {}, + "c": {}, + "u": {}, + "p": {}, + } + + if d.Path == "" { + return errors.New("invalid (empty) device path") + } + if _, ok := validTypes[d.Type]; !ok { + return errors.Errorf("device %q: invalid type %q", d.Path, d.Type) + } + for _, bit := range d.Permissions { + if bit != 'r' && bit != 'w' && bit != 'm' { + return errors.Errorf("device %q: invalid persmissions %q", + d.Path, d.Permissions) + } + } + return nil +} + +// Hook is a CDI Spec Hook wrapper, used for validating hooks. +type Hook struct { + *specs.Hook +} + +// Validate a hook. +func (h *Hook) Validate() error { + if _, ok := validHookNames[h.HookName]; !ok { + return errors.Errorf("invalid hook name %q", h.HookName) + } + if h.Path == "" { + return errors.Errorf("invalid hook %q with empty path", h.HookName) + } + if err := ValidateEnv(h.Env); err != nil { + return errors.Wrapf(err, "invalid hook %q", h.HookName) + } + return nil +} + +// Mount is a CDI Mount wrapper, used for validating mounts. +type Mount struct { + *specs.Mount +} + +// Validate a mount. +func (m *Mount) Validate() error { + if m.HostPath == "" { + return errors.New("invalid mount, empty host path") + } + if m.ContainerPath == "" { + return errors.New("invalid mount, empty container path") + } + return nil +} + +// Ensure OCI Spec hooks are not nil so we can add hooks. +func ensureOCIHooks(spec *oci.Spec) { + if spec.Hooks == nil { + spec.Hooks = &oci.Hooks{} + } +} + +// sortMounts sorts the mounts in the given OCI Spec. +func sortMounts(specgen *ocigen.Generator) { + mounts := specgen.Mounts() + specgen.ClearMounts() + sort.Sort(orderedMounts(mounts)) + specgen.Config.Mounts = mounts +} + +// orderedMounts defines how to sort an OCI Spec Mount slice. +// This is the almost the same implementation sa used by CRI-O and Docker, +// with a minor tweak for stable sorting order (easier to test): +// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26 +type orderedMounts []oci.Mount + +// Len returns the number of mounts. Used in sorting. +func (m orderedMounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m orderedMounts) Less(i, j int) bool { + ip, jp := m.parts(i), m.parts(j) + if ip < jp { + return true + } + if jp < ip { + return false + } + return m[i].Destination < m[j].Destination +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m orderedMounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m orderedMounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go new file mode 100644 index 00000000000..5d7ebcb557c --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go @@ -0,0 +1,56 @@ +//go:build !windows +// +build !windows + +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + runc "github.com/opencontainers/runc/libcontainer/devices" + "github.com/pkg/errors" +) + +// fillMissingInfo fills in missing mandatory attributes from the host device. +func (d *DeviceNode) fillMissingInfo() error { + if d.HostPath == "" { + d.HostPath = d.Path + } + + if d.Type != "" && (d.Major != 0 || d.Type == "p") { + return nil + } + + hostDev, err := runc.DeviceFromPath(d.HostPath, "rwm") + if err != nil { + return errors.Wrapf(err, "failed to stat CDI host device %q", d.HostPath) + } + + if d.Type == "" { + d.Type = string(hostDev.Type) + } else { + if d.Type != string(hostDev.Type) { + return errors.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)", + d.Path, d.HostPath, d.Type, string(hostDev.Type)) + } + } + if d.Major == 0 && d.Type != "p" { + d.Major = hostDev.Major + d.Minor = hostDev.Minor + } + + return nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go new file mode 100644 index 00000000000..fd91afa926c --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go @@ -0,0 +1,27 @@ +//go:build windows +// +build windows + +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import "fmt" + +// fillMissingInfo fills in missing mandatory attributes from the host device. +func (d *DeviceNode) fillMissingInfo() error { + return fmt.Errorf("unimplemented") +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go new file mode 100644 index 00000000000..0bb1f531bd3 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go @@ -0,0 +1,78 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" + oci "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// Device represents a CDI device of a Spec. +type Device struct { + *cdi.Device + spec *Spec +} + +// Create a new Device, associate it with the given Spec. +func newDevice(spec *Spec, d cdi.Device) (*Device, error) { + dev := &Device{ + Device: &d, + spec: spec, + } + + if err := dev.validate(); err != nil { + return nil, err + } + + return dev, nil +} + +// GetSpec returns the Spec this device is defined in. +func (d *Device) GetSpec() *Spec { + return d.spec +} + +// GetQualifiedName returns the qualified name for this device. +func (d *Device) GetQualifiedName() string { + return QualifiedName(d.spec.GetVendor(), d.spec.GetClass(), d.Name) +} + +// ApplyEdits applies the device-speific container edits to an OCI Spec. +func (d *Device) ApplyEdits(ociSpec *oci.Spec) error { + return d.edits().Apply(ociSpec) +} + +// edits returns the applicable container edits for this spec. +func (d *Device) edits() *ContainerEdits { + return &ContainerEdits{&d.ContainerEdits} +} + +// Validate the device. +func (d *Device) validate() error { + if err := ValidateDeviceName(d.Name); err != nil { + return err + } + edits := d.edits() + if edits.isEmpty() { + return errors.Errorf("invalid device, empty device edits") + } + if err := edits.Validate(); err != nil { + return errors.Wrapf(err, "invalid device %q", d.Name) + } + return nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go new file mode 100644 index 00000000000..96461229064 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go @@ -0,0 +1,274 @@ +// Package cdi has the primary purpose of providing an API for +// interacting with CDI and consuming CDI devices. +// +// For more information about Container Device Interface, please refer to +// https://github.com/container-orchestrated-devices/container-device-interface +// +// Container Device Interface +// +// Container Device Interface, or CDI for short, provides comprehensive +// third party device support for container runtimes. CDI uses vendor +// provided specification files, CDI Specs for short, to describe how a +// container's runtime environment should be modified when one or more +// of the vendor-specific devices is injected into the container. Beyond +// describing the low level platform-specific details of how to gain +// basic access to a device, CDI Specs allow more fine-grained device +// initialization, and the automatic injection of any necessary vendor- +// or device-specific software that might be required for a container +// to use a device or take full advantage of it. +// +// In the CDI device model containers request access to a device using +// fully qualified device names, qualified names for short, consisting of +// a vendor identifier, a device class and a device name or identifier. +// These pieces of information together uniquely identify a device among +// all device vendors, classes and device instances. +// +// This package implements an API for easy consumption of CDI. The API +// implements discovery, loading and caching of CDI Specs and injection +// of CDI devices into containers. This is the most common functionality +// the vast majority of CDI consumers need. The API should be usable both +// by OCI runtime clients and runtime implementations. +// +// CDI Registry +// +// The primary interface to interact with CDI devices is the Registry. It +// is essentially a cache of all Specs and devices discovered in standard +// CDI directories on the host. The registry has two main functionality, +// injecting devices into an OCI Spec and refreshing the cache of CDI +// Specs and devices. +// +// Device Injection +// +// Using the Registry one can inject CDI devices into a container with code +// similar to the following snippet: +// +// import ( +// "fmt" +// "strings" +// +// "github.com/pkg/errors" +// log "github.com/sirupsen/logrus" +// +// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" +// oci "github.com/opencontainers/runtime-spec/specs-go" +// ) +// +// func injectCDIDevices(spec *oci.Spec, devices []string) error { +// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) +// +// unresolved, err := cdi.GetRegistry().InjectDevices(spec, devices) +// if err != nil { +// return errors.Wrap(err, "CDI device injection failed") +// } +// +// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) +// return nil +// } +// +// Cache Refresh +// +// By default the CDI Spec cache monitors the configured Spec directories +// and automatically refreshes itself when necessary. This behavior can be +// disabled using the WithAutoRefresh(false) option. +// +// Failure to set up monitoring for a Spec directory causes the directory to +// get ignored and an error to be recorded among the Spec directory errors. +// These errors can be queried using the GetSpecDirErrors() function. If the +// error condition is transient, for instance a missing directory which later +// gets created, the corresponding error will be removed once the condition +// is over. +// +// With auto-refresh enabled injecting any CDI devices can be done without +// an explicit call to Refresh(), using a code snippet similar to the +// following: +// +// In a runtime implementation one typically wants to make sure the +// CDI Spec cache is up to date before performing device injection. +// A code snippet similar to the following accmplishes that: +// +// import ( +// "fmt" +// "strings" +// +// "github.com/pkg/errors" +// log "github.com/sirupsen/logrus" +// +// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" +// oci "github.com/opencontainers/runtime-spec/specs-go" +// ) +// +// func injectCDIDevices(spec *oci.Spec, devices []string) error { +// registry := cdi.GetRegistry() +// +// if err := registry.Refresh(); err != nil { +// // Note: +// // It is up to the implementation to decide whether +// // to abort injection on errors. A failed Refresh() +// // does not necessarily render the registry unusable. +// // For instance, a parse error in a Spec file for +// // vendor A does not have any effect on devices of +// // vendor B... +// log.Warnf("pre-injection Refresh() failed: %v", err) +// } +// +// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) +// +// unresolved, err := registry.InjectDevices(spec, devices) +// if err != nil { +// return errors.Wrap(err, "CDI device injection failed") +// } +// +// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) +// return nil +// } +// +// Generated Spec Files, Multiple Directories, Device Precedence +// +// It is often necessary to generate Spec files dynamically. On some +// systems the available or usable set of CDI devices might change +// dynamically which then needs to be reflected in CDI Specs. For +// some device classes it makes sense to enumerate the available +// devices at every boot and generate Spec file entries for each +// device found. Some CDI devices might need special client- or +// request-specific configuration which can only be fulfilled by +// dynamically generated client-specific entries in transient Spec +// files. +// +// CDI can collect Spec files from multiple directories. Spec files are +// automatically assigned priorities according to which directory they +// were loaded from. The later a directory occurs in the list of CDI +// directories to scan, the higher priority Spec files loaded from that +// directory are assigned to. When two or more Spec files define the +// same device, conflict is resolved by chosing the definition from the +// Spec file with the highest priority. +// +// The default CDI directory configuration is chosen to encourage +// separating dynamically generated CDI Spec files from static ones. +// The default directories are '/etc/cdi' and '/var/run/cdi'. By putting +// dynamically generated Spec files under '/var/run/cdi', those take +// precedence over static ones in '/etc/cdi'. With this scheme, static +// Spec files, typically installed by distro-specific packages, go into +// '/etc/cdi' while all the dynamically generated Spec files, transient +// or other, go into '/var/run/cdi'. +// +// Spec File Generation +// +// CDI offers two functions for writing and removing dynamically generated +// Specs from CDI Spec directories. These functions, WriteSpec() and +// RemoveSpec() implicitly follow the principle of separating dynamic Specs +// from the rest and therefore always write to and remove Specs from the +// last configured directory. +// +// Corresponding functions are also provided for generating names for Spec +// files. These functions follow a simple naming convention to ensure that +// multiple entities generating Spec files simultaneously on the same host +// do not end up using conflicting Spec file names. GenerateSpecName(), +// GenerateNameForSpec(), GenerateTransientSpecName(), and +// GenerateTransientNameForSpec() all generate names which can be passed +// as such to WriteSpec() and subsequently to RemoveSpec(). +// +// Generating a Spec file for a vendor/device class can be done with a +// code snippet similar to the following: +// +// import ( +// "fmt" +// ... +// "github.com/container-orchestrated-devices/container-device-interface/specs-go" +// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" +// ) +// +// func generateDeviceSpecs() error { +// registry := cdi.GetRegistry() +// spec := &specs.Spec{ +// Version: specs.CurrentVersion, +// Kind: vendor+"/"+class, +// } +// +// for _, dev := range enumerateDevices() { +// spec.Devices = append(spec.Devices, specs.Device{ +// Name: dev.Name, +// ContainerEdits: getContainerEditsForDevice(dev), +// }) +// } +// +// specName, err := cdi.GenerateNameForSpec(spec) +// if err != nil { +// return fmt.Errorf("failed to generate Spec name: %w", err) +// } +// +// return registry.WriteSpec(spec, specName) +// } +// +// Similary, generating and later cleaning up transient Spec files can be +// done with code fragments similar to the following. These transient Spec +// files are temporary Spec files with container-specific parametrization. +// They are typically created before the associated container is created +// and removed once that container is removed. +// +// import ( +// "fmt" +// ... +// "github.com/container-orchestrated-devices/container-device-interface/specs-go" +// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" +// ) +// +// func generateTransientSpec(ctr Container) error { +// registry := cdi.GetRegistry() +// devices := getContainerDevs(ctr, vendor, class) +// spec := &specs.Spec{ +// Version: specs.CurrentVersion, +// Kind: vendor+"/"+class, +// } +// +// for _, dev := range devices { +// spec.Devices = append(spec.Devices, specs.Device{ +// // the generated name needs to be unique within the +// // vendor/class domain on the host/node. +// Name: generateUniqueDevName(dev, ctr), +// ContainerEdits: getEditsForContainer(dev), +// }) +// } +// +// // transientID is expected to guarantee that the Spec file name +// // generated using is unique within +// // the host/node. If more than one device is allocated with the +// // same vendor/class domain, either all generated Spec entries +// // should go to a single Spec file (like in this sample snippet), +// // or transientID should be unique for each generated Spec file. +// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) +// specName, err := cdi.GenerateNameForTransientSpec(vendor, class, transientID) +// if err != nil { +// return fmt.Errorf("failed to generate Spec name: %w", err) +// } +// +// return registry.WriteSpec(spec, specName) +// } +// +// func removeTransientSpec(ctr Container) error { +// registry := cdi.GetRegistry() +// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) +// specName := cdi.GenerateNameForTransientSpec(vendor, class, transientID) +// +// return registry.RemoveSpec(specName) +// } +// +// CDI Spec Validation +// +// This package performs both syntactic and semantic validation of CDI +// Spec file data when a Spec file is loaded via the registry or using +// the ReadSpec API function. As part of the semantic verification, the +// Spec file is verified against the CDI Spec JSON validation schema. +// +// If a valid externally provided JSON validation schema is found in +// the filesystem at /etc/cdi/schema/schema.json it is loaded and used +// as the default validation schema. If such a file is not found or +// fails to load, an embedded no-op schema is used. +// +// The used validation schema can also be changed programmatically using +// the SetSchema API convenience function. This function also accepts +// the special "builtin" (BuiltinSchemaName) and "none" (NoneSchemaName) +// schema names which switch the used schema to the in-repo validation +// schema embedded into the binary or the now default no-op schema +// correspondingly. Other names are interpreted as the path to the actual +// validation schema to load and use. +package cdi diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go new file mode 100644 index 00000000000..ccfab7094cd --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go @@ -0,0 +1,206 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "strings" + + "github.com/pkg/errors" +) + +// QualifiedName returns the qualified name for a device. +// The syntax for a qualified device names is +// "/=". +// A valid vendor name may contain the following runes: +// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. +// A valid class name may contain the following runes: +// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_'. +// A valid device name may containe the following runes: +// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' +func QualifiedName(vendor, class, name string) string { + return vendor + "/" + class + "=" + name +} + +// IsQualifiedName tests if a device name is qualified. +func IsQualifiedName(device string) bool { + _, _, _, err := ParseQualifiedName(device) + return err == nil +} + +// ParseQualifiedName splits a qualified name into device vendor, class, +// and name. If the device fails to parse as a qualified name, or if any +// of the split components fail to pass syntax validation, vendor and +// class are returned as empty, together with the verbatim input as the +// name and an error describing the reason for failure. +func ParseQualifiedName(device string) (string, string, string, error) { + vendor, class, name := ParseDevice(device) + + if vendor == "" { + return "", "", device, errors.Errorf("unqualified device %q, missing vendor", device) + } + if class == "" { + return "", "", device, errors.Errorf("unqualified device %q, missing class", device) + } + if name == "" { + return "", "", device, errors.Errorf("unqualified device %q, missing device name", device) + } + + if err := ValidateVendorName(vendor); err != nil { + return "", "", device, errors.Wrapf(err, "invalid device %q", device) + } + if err := ValidateClassName(class); err != nil { + return "", "", device, errors.Wrapf(err, "invalid device %q", device) + } + if err := ValidateDeviceName(name); err != nil { + return "", "", device, errors.Wrapf(err, "invalid device %q", device) + } + + return vendor, class, name, nil +} + +// ParseDevice tries to split a device name into vendor, class, and name. +// If this fails, for instance in the case of unqualified device names, +// ParseDevice returns an empty vendor and class together with name set +// to the verbatim input. +func ParseDevice(device string) (string, string, string) { + if device == "" || device[0] == '/' { + return "", "", device + } + + parts := strings.SplitN(device, "=", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", device + } + + name := parts[1] + vendor, class := ParseQualifier(parts[0]) + if vendor == "" { + return "", "", device + } + + return vendor, class, name +} + +// ParseQualifier splits a device qualifier into vendor and class. +// The syntax for a device qualifier is +// "/" +// If parsing fails, an empty vendor and the class set to the +// verbatim input is returned. +func ParseQualifier(kind string) (string, string) { + parts := strings.SplitN(kind, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", kind + } + return parts[0], parts[1] +} + +// ValidateVendorName checks the validity of a vendor name. +// A vendor name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func ValidateVendorName(vendor string) error { + if vendor == "" { + return errors.Errorf("invalid (empty) vendor name") + } + if !isLetter(rune(vendor[0])) { + return errors.Errorf("invalid vendor %q, should start with letter", vendor) + } + for _, c := range string(vendor[1 : len(vendor)-1]) { + switch { + case isAlphaNumeric(c): + case c == '_' || c == '-' || c == '.': + default: + return errors.Errorf("invalid character '%c' in vendor name %q", + c, vendor) + } + } + if !isAlphaNumeric(rune(vendor[len(vendor)-1])) { + return errors.Errorf("invalid vendor %q, should end with a letter or digit", vendor) + } + + return nil +} + +// ValidateClassName checks the validity of class name. +// A class name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore and dash ('_', '-') +func ValidateClassName(class string) error { + if class == "" { + return errors.Errorf("invalid (empty) device class") + } + if !isLetter(rune(class[0])) { + return errors.Errorf("invalid class %q, should start with letter", class) + } + for _, c := range string(class[1 : len(class)-1]) { + switch { + case isAlphaNumeric(c): + case c == '_' || c == '-': + default: + return errors.Errorf("invalid character '%c' in device class %q", + c, class) + } + } + if !isAlphaNumeric(rune(class[len(class)-1])) { + return errors.Errorf("invalid class %q, should end with a letter or digit", class) + } + return nil +} + +// ValidateDeviceName checks the validity of a device name. +// A device name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, dot, colon ('_', '-', '.', ':') +func ValidateDeviceName(name string) error { + if name == "" { + return errors.Errorf("invalid (empty) device name") + } + if !isAlphaNumeric(rune(name[0])) { + return errors.Errorf("invalid class %q, should start with a letter or digit", name) + } + if len(name) == 1 { + return nil + } + for _, c := range string(name[1 : len(name)-1]) { + switch { + case isAlphaNumeric(c): + case c == '_' || c == '-' || c == '.' || c == ':': + default: + return errors.Errorf("invalid character '%c' in device name %q", + c, name) + } + } + if !isAlphaNumeric(rune(name[len(name)-1])) { + return errors.Errorf("invalid name %q, should end with a letter or digit", name) + } + return nil +} + +func isLetter(c rune) bool { + return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') +} + +func isDigit(c rune) bool { + return '0' <= c && c <= '9' +} + +func isAlphaNumeric(c rune) bool { + return isLetter(c) || isDigit(c) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go new file mode 100644 index 00000000000..10fab8997e0 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go @@ -0,0 +1,152 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "sync" + + cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" + oci "github.com/opencontainers/runtime-spec/specs-go" +) + +// +// Registry keeps a cache of all CDI Specs installed or generated on +// the host. Registry is the primary interface clients should use to +// interact with CDI. +// +// The most commonly used Registry functions are for refreshing the +// registry and injecting CDI devices into an OCI Spec. +// +type Registry interface { + RegistryResolver + RegistryRefresher + DeviceDB() RegistryDeviceDB + SpecDB() RegistrySpecDB +} + +// RegistryRefresher is the registry interface for refreshing the +// cache of CDI Specs and devices. +// +// Configure reconfigures the registry with the given options. +// +// Refresh rescans all CDI Spec directories and updates the +// state of the cache to reflect any changes. It returns any +// errors encountered during the refresh. +// +// GetErrors returns all errors encountered for any of the scanned +// Spec files during the last cache refresh. +// +// GetSpecDirectories returns the set up CDI Spec directories +// currently in use. The directories are returned in the scan +// order of Refresh(). +// +// GetSpecDirErrors returns any errors related to the configured +// Spec directories. +type RegistryRefresher interface { + Configure(...Option) error + Refresh() error + GetErrors() map[string][]error + GetSpecDirectories() []string + GetSpecDirErrors() map[string]error +} + +// RegistryResolver is the registry interface for injecting CDI +// devices into an OCI Spec. +// +// InjectDevices takes an OCI Spec and injects into it a set of +// CDI devices given by qualified name. It returns the names of +// any unresolved devices and an error if injection fails. +type RegistryResolver interface { + InjectDevices(spec *oci.Spec, device ...string) (unresolved []string, err error) +} + +// RegistryDeviceDB is the registry interface for querying devices. +// +// GetDevice returns the CDI device for the given qualified name. If +// the device is not GetDevice returns nil. +// +// ListDevices returns a slice with the names of qualified device +// known. The returned slice is sorted. +type RegistryDeviceDB interface { + GetDevice(device string) *Device + ListDevices() []string +} + +// RegistrySpecDB is the registry interface for querying CDI Specs. +// +// ListVendors returns a slice with all vendors known. The returned +// slice is sorted. +// +// ListClasses returns a slice with all classes known. The returned +// slice is sorted. +// +// GetVendorSpecs returns a slice of all Specs for the vendor. +// +// GetSpecErrors returns any errors for the Spec encountered during +// the last cache refresh. +// +// WriteSpec writes the Spec with the given content and name to the +// last Spec directory. +type RegistrySpecDB interface { + ListVendors() []string + ListClasses() []string + GetVendorSpecs(vendor string) []*Spec + GetSpecErrors(*Spec) []error + WriteSpec(raw *cdi.Spec, name string) error + RemoveSpec(name string) error +} + +type registry struct { + *Cache +} + +var _ Registry = ®istry{} + +var ( + reg *registry + initOnce sync.Once +) + +// GetRegistry returns the CDI registry. If any options are given, those +// are applied to the registry. +func GetRegistry(options ...Option) Registry { + var new bool + initOnce.Do(func() { + reg, _ = getRegistry(options...) + new = true + }) + if !new && len(options) > 0 { + reg.Configure(options...) + reg.Refresh() + } + return reg +} + +// DeviceDB returns the registry interface for querying devices. +func (r *registry) DeviceDB() RegistryDeviceDB { + return r +} + +// SpecDB returns the registry interface for querying Specs. +func (r *registry) SpecDB() RegistrySpecDB { + return r +} + +func getRegistry(options ...Option) (*registry, error) { + c, err := NewCache(options...) + return ®istry{c}, err +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go new file mode 100644 index 00000000000..f339349bbac --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go @@ -0,0 +1,114 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "errors" + "io/fs" + "os" + "path/filepath" +) + +const ( + // DefaultStaticDir is the default directory for static CDI Specs. + DefaultStaticDir = "/etc/cdi" + // DefaultDynamicDir is the default directory for generated CDI Specs + DefaultDynamicDir = "/var/run/cdi" +) + +var ( + // DefaultSpecDirs is the default Spec directory configuration. + // While altering this variable changes the package defaults, + // the preferred way of overriding the default directories is + // to use a WithSpecDirs options. Otherwise the change is only + // effective if it takes place before creating the Registry or + // other Cache instances. + DefaultSpecDirs = []string{DefaultStaticDir, DefaultDynamicDir} + // ErrStopScan can be returned from a ScanSpecFunc to stop the scan. + ErrStopScan = errors.New("stop Spec scan") +) + +// WithSpecDirs returns an option to override the CDI Spec directories. +func WithSpecDirs(dirs ...string) Option { + return func(c *Cache) error { + specDirs := make([]string, len(dirs)) + for i, dir := range dirs { + specDirs[i] = filepath.Clean(dir) + } + c.specDirs = specDirs + return nil + } +} + +// scanSpecFunc is a function for processing CDI Spec files. +type scanSpecFunc func(string, int, *Spec, error) error + +// ScanSpecDirs scans the given directories looking for CDI Spec files, +// which are all files with a '.json' or '.yaml' suffix. For every Spec +// file discovered, ScanSpecDirs loads a Spec from the file then calls +// the scan function passing it the path to the file, the priority (the +// index of the directory in the slice of directories given), the Spec +// itself, and any error encountered while loading the Spec. +// +// Scanning stops once all files have been processed or when the scan +// function returns an error. The result of ScanSpecDirs is the error +// returned by the scan function, if any. The special error ErrStopScan +// can be used to terminate the scan gracefully without ScanSpecDirs +// returning an error. ScanSpecDirs silently skips any subdirectories. +func scanSpecDirs(dirs []string, scanFn scanSpecFunc) error { + var ( + spec *Spec + err error + ) + + for priority, dir := range dirs { + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + // for initial stat failure Walk calls us with nil info + if info == nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + // first call from Walk is for dir itself, others we skip + if info.IsDir() { + if path == dir { + return nil + } + return filepath.SkipDir + } + + // ignore obviously non-Spec files + if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { + return nil + } + + if err != nil { + return scanFn(path, priority, nil, err) + } + + spec, err = ReadSpec(path, priority) + return scanFn(path, priority, spec, err) + }) + + if err != nil && err != ErrStopScan { + return err + } + } + + return nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go new file mode 100644 index 00000000000..c558b8efed8 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go @@ -0,0 +1,350 @@ +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + oci "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" +) + +const ( + // CurrentVersion is the current vesion of the CDI Spec. + CurrentVersion = cdi.CurrentVersion + + // defaultSpecExt is the file extension for the default encoding. + defaultSpecExt = ".yaml" +) + +var ( + // Valid CDI Spec versions. + validSpecVersions = map[string]struct{}{ + "0.1.0": {}, + "0.2.0": {}, + "0.3.0": {}, + "0.4.0": {}, + "0.5.0": {}, + } + + // Externally set CDI Spec validation function. + specValidator func(*cdi.Spec) error + validatorLock sync.RWMutex +) + +// Spec represents a single CDI Spec. It is usually loaded from a +// file and stored in a cache. The Spec has an associated priority. +// This priority is inherited from the associated priority of the +// CDI Spec directory that contains the CDI Spec file and is used +// to resolve conflicts if multiple CDI Spec files contain entries +// for the same fully qualified device. +type Spec struct { + *cdi.Spec + vendor string + class string + path string + priority int + devices map[string]*Device +} + +// ReadSpec reads the given CDI Spec file. The resulting Spec is +// assigned the given priority. If reading or parsing the Spec +// data fails ReadSpec returns a nil Spec and an error. +func ReadSpec(path string, priority int) (*Spec, error) { + data, err := ioutil.ReadFile(path) + switch { + case os.IsNotExist(err): + return nil, err + case err != nil: + return nil, errors.Wrapf(err, "failed to read CDI Spec %q", path) + } + + raw, err := ParseSpec(data) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse CDI Spec %q", path) + } + if raw == nil { + return nil, errors.Errorf("failed to parse CDI Spec %q, no Spec data", path) + } + + spec, err := newSpec(raw, path, priority) + if err != nil { + return nil, err + } + + return spec, nil +} + +// newSpec creates a new Spec from the given CDI Spec data. The +// Spec is marked as loaded from the given path with the given +// priority. If Spec data validation fails newSpec returns a nil +// Spec and an error. +func newSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) { + err := validateSpec(raw) + if err != nil { + return nil, err + } + + spec := &Spec{ + Spec: raw, + path: filepath.Clean(path), + priority: priority, + } + + if ext := filepath.Ext(spec.path); ext != ".yaml" && ext != ".json" { + spec.path += defaultSpecExt + } + + spec.vendor, spec.class = ParseQualifier(spec.Kind) + + if spec.devices, err = spec.validate(); err != nil { + return nil, errors.Wrap(err, "invalid CDI Spec") + } + + return spec, nil +} + +// Write the CDI Spec to the file associated with it during instantiation +// by newSpec() or ReadSpec(). +func (s *Spec) write(overwrite bool) error { + var ( + data []byte + dir string + tmp *os.File + err error + ) + + err = validateSpec(s.Spec) + if err != nil { + return err + } + + if filepath.Ext(s.path) == ".yaml" { + data, err = yaml.Marshal(s.Spec) + } else { + data, err = json.Marshal(s.Spec) + } + if err != nil { + return errors.Wrap(err, "failed to marshal Spec file") + } + + dir = filepath.Dir(s.path) + err = os.MkdirAll(dir, 0o755) + if err != nil { + return errors.Wrap(err, "failed to create Spec dir") + } + + tmp, err = os.CreateTemp(dir, "spec.*.tmp") + if err != nil { + return errors.Wrap(err, "failed to create Spec file") + } + _, err = tmp.Write(data) + tmp.Close() + if err != nil { + return errors.Wrap(err, "failed to write Spec file") + } + + err = renameIn(dir, filepath.Base(tmp.Name()), filepath.Base(s.path), overwrite) + + if err != nil { + os.Remove(tmp.Name()) + err = errors.Wrap(err, "failed to write Spec file") + } + + return err +} + +// GetVendor returns the vendor of this Spec. +func (s *Spec) GetVendor() string { + return s.vendor +} + +// GetClass returns the device class of this Spec. +func (s *Spec) GetClass() string { + return s.class +} + +// GetDevice returns the device for the given unqualified name. +func (s *Spec) GetDevice(name string) *Device { + return s.devices[name] +} + +// GetPath returns the filesystem path of this Spec. +func (s *Spec) GetPath() string { + return s.path +} + +// GetPriority returns the priority of this Spec. +func (s *Spec) GetPriority() int { + return s.priority +} + +// ApplyEdits applies the Spec's global-scope container edits to an OCI Spec. +func (s *Spec) ApplyEdits(ociSpec *oci.Spec) error { + return s.edits().Apply(ociSpec) +} + +// edits returns the applicable global container edits for this spec. +func (s *Spec) edits() *ContainerEdits { + return &ContainerEdits{&s.ContainerEdits} +} + +// Validate the Spec. +func (s *Spec) validate() (map[string]*Device, error) { + if err := validateVersion(s.Version); err != nil { + return nil, err + } + if err := ValidateVendorName(s.vendor); err != nil { + return nil, err + } + if err := ValidateClassName(s.class); err != nil { + return nil, err + } + if err := s.edits().Validate(); err != nil { + return nil, err + } + + devices := make(map[string]*Device) + for _, d := range s.Devices { + dev, err := newDevice(s, d) + if err != nil { + return nil, errors.Wrapf(err, "failed add device %q", d.Name) + } + if _, conflict := devices[d.Name]; conflict { + return nil, errors.Errorf("invalid spec, multiple device %q", d.Name) + } + devices[d.Name] = dev + } + + return devices, nil +} + +// validateVersion checks whether the specified spec version is supported. +func validateVersion(version string) error { + if _, ok := validSpecVersions[version]; !ok { + return errors.Errorf("invalid version %q", version) + } + + return nil +} + +// ParseSpec parses CDI Spec data into a raw CDI Spec. +func ParseSpec(data []byte) (*cdi.Spec, error) { + var raw *cdi.Spec + err := yaml.UnmarshalStrict(data, &raw) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal CDI Spec") + } + return raw, nil +} + +// SetSpecValidator sets a CDI Spec validator function. This function +// is used for extra CDI Spec content validation whenever a Spec file +// loaded (using ReadSpec() or written (using WriteSpec()). +func SetSpecValidator(fn func(*cdi.Spec) error) { + validatorLock.Lock() + defer validatorLock.Unlock() + specValidator = fn +} + +// validateSpec validates the Spec using the extneral validator. +func validateSpec(raw *cdi.Spec) error { + validatorLock.RLock() + defer validatorLock.RUnlock() + + if specValidator == nil { + return nil + } + err := specValidator(raw) + if err != nil { + return errors.Wrap(err, "Spec validation failed") + } + return nil +} + +// GenerateSpecName generates a vendor+class scoped Spec file name. The +// name can be passed to WriteSpec() to write a Spec file to the file +// system. +// +// vendor and class should match the vendor and class of the CDI Spec. +// The file name is generated without a ".json" or ".yaml" extension. +// The caller can append the desired extension to choose a particular +// encoding. Otherwise WriteSpec() will use its default encoding. +// +// This function always returns the same name for the same vendor/class +// combination. Therefore it cannot be used as such to generate multiple +// Spec file names for a single vendor and class. +func GenerateSpecName(vendor, class string) string { + return vendor + "-" + class +} + +// GenerateTransientSpecName generates a vendor+class scoped transient +// Spec file name. The name can be passed to WriteSpec() to write a Spec +// file to the file system. +// +// Transient Specs are those whose lifecycle is tied to that of some +// external entity, for instance a container. vendor and class should +// match the vendor and class of the CDI Spec. transientID should be +// unique among all CDI users on the same host that might generate +// transient Spec files using the same vendor/class combination. If +// the external entity to which the lifecycle of the tranient Spec +// is tied to has a unique ID of its own, then this is usually a +// good choice for transientID. +// +// The file name is generated without a ".json" or ".yaml" extension. +// The caller can append the desired extension to choose a particular +// encoding. Otherwise WriteSpec() will use its default encoding. +func GenerateTransientSpecName(vendor, class, transientID string) string { + transientID = strings.ReplaceAll(transientID, "/", "_") + return GenerateSpecName(vendor, class) + "_" + transientID +} + +// GenerateNameForSpec generates a name for the given Spec using +// GenerateSpecName with the vendor and class taken from the Spec. +// On success it returns the generated name and a nil error. If +// the Spec does not contain a valid vendor or class, it returns +// an empty name and a non-nil error. +func GenerateNameForSpec(raw *cdi.Spec) (string, error) { + vendor, class := ParseQualifier(raw.Kind) + if vendor == "" { + return "", errors.Errorf("invalid vendor/class %q in Spec", raw.Kind) + } + + return GenerateSpecName(vendor, class), nil +} + +// GenerateNameForTransientSpec generates a name for the given transient +// Spec using GenerateTransientSpecName with the vendor and class taken +// from the Spec. On success it returns the generated name and a nil error. +// If the Spec does not contain a valid vendor or class, it returns an +// an empty name and a non-nil error. +func GenerateNameForTransientSpec(raw *cdi.Spec, transientID string) (string, error) { + vendor, class := ParseQualifier(raw.Kind) + if vendor == "" { + return "", errors.Errorf("invalid vendor/class %q in Spec", raw.Kind) + } + + return GenerateTransientSpecName(vendor, class, transientID), nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go new file mode 100644 index 00000000000..cca825c60df --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go @@ -0,0 +1,48 @@ +/* + Copyright © 2022 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// Rename src to dst, both relative to the directory dir. If dst already exists +// refuse renaming with an error unless overwrite is explicitly asked for. +func renameIn(dir, src, dst string, overwrite bool) error { + var flags uint + + dirf, err := os.Open(dir) + if err != nil { + return errors.Wrap(err, "rename failed") + } + defer dirf.Close() + + if !overwrite { + flags = unix.RENAME_NOREPLACE + } + + dirFd := int(dirf.Fd()) + err = unix.Renameat2(dirFd, src, dirFd, dst, flags) + if err != nil { + return errors.Wrap(err, "rename failed") + } + + return nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go new file mode 100644 index 00000000000..285e04e27a3 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go @@ -0,0 +1,39 @@ +//go:build !linux +// +build !linux + +/* + Copyright © 2022 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "os" + "path/filepath" +) + +// Rename src to dst, both relative to the directory dir. If dst already exists +// refuse renaming with an error unless overwrite is explicitly asked for. +func renameIn(dir, src, dst string, overwrite bool) error { + src = filepath.Join(dir, src) + dst = filepath.Join(dir, dst) + + _, err := os.Stat(dst) + if err == nil && !overwrite { + return os.ErrExist + } + + return os.Rename(src, dst) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go new file mode 100644 index 00000000000..3fa2e814bd8 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go @@ -0,0 +1,59 @@ +package specs + +import "os" + +// CurrentVersion is the current version of the Spec. +const CurrentVersion = "0.5.0" + +// Spec is the base configuration for CDI +type Spec struct { + Version string `json:"cdiVersion"` + Kind string `json:"kind"` + + Devices []Device `json:"devices"` + ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` +} + +// Device is a "Device" a container runtime can add to a container +type Device struct { + Name string `json:"name"` + ContainerEdits ContainerEdits `json:"containerEdits"` +} + +// ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. +type ContainerEdits struct { + Env []string `json:"env,omitempty"` + DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` + Hooks []*Hook `json:"hooks,omitempty"` + Mounts []*Mount `json:"mounts,omitempty"` +} + +// DeviceNode represents a device node that needs to be added to the OCI spec. +type DeviceNode struct { + Path string `json:"path"` + HostPath string `json:"hostPath,omitempty"` + Type string `json:"type,omitempty"` + Major int64 `json:"major,omitempty"` + Minor int64 `json:"minor,omitempty"` + FileMode *os.FileMode `json:"fileMode,omitempty"` + Permissions string `json:"permissions,omitempty"` + UID *uint32 `json:"uid,omitempty"` + GID *uint32 `json:"gid,omitempty"` +} + +// Mount represents a mount that needs to be added to the OCI spec. +type Mount struct { + HostPath string `json:"hostPath"` + ContainerPath string `json:"containerPath"` + Options []string `json:"options,omitempty"` + Type string `json:"type,omitempty"` +} + +// Hook represents a hook that needs to be added to the OCI spec. +type Hook struct { + HookName string `json:"hookName"` + Path string `json:"path"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` + Timeout *int `json:"timeout,omitempty"` +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go new file mode 100644 index 00000000000..14a0f6a0ba1 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go @@ -0,0 +1,113 @@ +package specs + +import ( + "errors" + "fmt" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +// ApplyOCIEditsForDevice applies devices OCI edits, in other words +// it finds the device in the CDI spec and applies the OCI patches that device +// requires to the OCI specification. +func ApplyOCIEditsForDevice(config *spec.Spec, cdi *Spec, dev string) error { + for _, d := range cdi.Devices { + if d.Name != dev { + continue + } + + return ApplyEditsToOCISpec(config, &d.ContainerEdits) + } + + return fmt.Errorf("CDI: device %q not found for spec %q", dev, cdi.Kind) +} + +// ApplyOCIEdits applies the OCI edits the CDI spec declares globablly +func ApplyOCIEdits(config *spec.Spec, cdi *Spec) error { + return ApplyEditsToOCISpec(config, &cdi.ContainerEdits) +} + +// ApplyEditsToOCISpec applies the specified edits to the OCI spec. +func ApplyEditsToOCISpec(config *spec.Spec, edits *ContainerEdits) error { + if config == nil { + return errors.New("spec is nil") + } + if edits == nil { + return nil + } + + if len(edits.Env) > 0 { + if config.Process == nil { + config.Process = &spec.Process{} + } + config.Process.Env = append(config.Process.Env, edits.Env...) + } + + for _, d := range edits.DeviceNodes { + if config.Linux == nil { + config.Linux = &spec.Linux{} + } + config.Linux.Devices = append(config.Linux.Devices, d.ToOCI()) + } + + for _, m := range edits.Mounts { + config.Mounts = append(config.Mounts, m.ToOCI()) + } + + for _, h := range edits.Hooks { + if config.Hooks == nil { + config.Hooks = &spec.Hooks{} + } + switch h.HookName { + case "prestart": + config.Hooks.Prestart = append(config.Hooks.Prestart, h.ToOCI()) + case "createRuntime": + config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, h.ToOCI()) + case "createContainer": + config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, h.ToOCI()) + case "startContainer": + config.Hooks.StartContainer = append(config.Hooks.StartContainer, h.ToOCI()) + case "poststart": + config.Hooks.Poststart = append(config.Hooks.Poststart, h.ToOCI()) + case "poststop": + config.Hooks.Poststop = append(config.Hooks.Poststop, h.ToOCI()) + default: + fmt.Printf("CDI: Unknown hook %q\n", h.HookName) + } + } + + return nil +} + +// ToOCI returns the opencontainers runtime Spec Hook for this Hook. +func (h *Hook) ToOCI() spec.Hook { + return spec.Hook{ + Path: h.Path, + Args: h.Args, + Env: h.Env, + Timeout: h.Timeout, + } +} + +// ToOCI returns the opencontainers runtime Spec Mount for this Mount. +func (m *Mount) ToOCI() spec.Mount { + return spec.Mount{ + Source: m.HostPath, + Destination: m.ContainerPath, + Options: m.Options, + Type: m.Type, + } +} + +// ToOCI returns the opencontainers runtime Spec LinuxDevice for this DeviceNode. +func (d *DeviceNode) ToOCI() spec.LinuxDevice { + return spec.LinuxDevice{ + Path: d.Path, + Type: d.Type, + Major: d.Major, + Minor: d.Minor, + FileMode: d.FileMode, + UID: d.UID, + GID: d.GID, + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 0da3efe4c21..b071cea51dd 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -49,6 +49,7 @@ type options struct { missedPrioritizedFiles *[]string compression Compression ctx context.Context + minChunkSize int } type Option func(o *options) error @@ -63,6 +64,7 @@ func WithChunkSize(chunkSize int) Option { // WithCompressionLevel option specifies the gzip compression level. // The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. // See also: https://godoc.org/compress/gzip#pkg-constants func WithCompressionLevel(level int) Option { return func(o *options) error { @@ -113,6 +115,18 @@ func WithContext(ctx context.Context) Option { } } +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -180,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { if err != nil { return nil, err } - tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } writers := make([]*Writer, len(tarParts)) payloads := make([]*os.File, len(tarParts)) var mu sync.Mutex @@ -195,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } sw := NewWriterWithCompressor(esgzFile, opts.compression) sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { return err } @@ -209,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = err return nil, err } - tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + tocAndFooter, tocDgst, err := closeWithCombine(writers...) if err != nil { rErr = err return nil, err @@ -252,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { // Writers doesn't write TOC and footer to the underlying writers so they can be // combined into a single eStargz and tocAndFooter returned by this function can // be appended at the tail of that combined blob. -func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { if len(ws) == 0 { return nil, "", fmt.Errorf("at least one writer must be passed") } @@ -395,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader { func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} - pw, err := newCountReader(in) + pw, err := newCountReadSeeker(in) if err != nil { return nil, fmt.Errorf("failed to make position watcher: %w", err) } @@ -571,19 +599,19 @@ func (tf *tempFiles) cleanupAll() error { return errorutil.Aggregate(allErr) } -func newCountReader(r io.ReaderAt) (*countReader, error) { +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { pos := int64(0) - return &countReader{r: r, cPos: &pos}, nil + return &countReadSeeker{r: r, cPos: &pos}, nil } -type countReader struct { +type countReadSeeker struct { r io.ReaderAt cPos *int64 mu sync.Mutex } -func (cr *countReader) Read(p []byte) (int, error) { +func (cr *countReadSeeker) Read(p []byte) (int, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -594,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) { return n, err } -func (cr *countReader) Seek(offset int64, whence int) (int64, error) { +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -615,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) { return offset, nil } -func (cr *countReader) currentPos() int64 { +func (cr *countReadSeeker) currentPos() int64 { cr.mu.Lock() defer cr.mu.Unlock() diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 921e59ec6ef..f4d55465584 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -150,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { allErr = append(allErr, err) continue } - if tocSize <= 0 { + if tocOffset >= 0 && tocSize <= 0 { tocSize = sr.Size() - tocOffset - fSize } - if tocSize < int64(len(maybeTocBytes)) { + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { maybeTocBytes = maybeTocBytes[:tocSize] } r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) @@ -207,8 +207,16 @@ func (r *Reader) initFields() error { uname := map[int]string{} gname := map[int]string{} var lastRegEnt *TOCEntry - for _, ent := range r.toc.Entries { + var chunkTopIndex int + for i, ent := range r.toc.Entries { ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } if ent.Type == "reg" { lastRegEnt = ent } @@ -294,7 +302,7 @@ func (r *Reader) initFields() error { if e.isDataType() { e.nextOffset = lastOffset } - if e.Offset != 0 { + if e.Offset != 0 && e.InnerOffset == 0 { lastOffset = e.Offset } } @@ -488,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { // // Name must be absolute path or one that is relative to root. func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { name = cleanEntryName(name) ent, ok := r.Lookup(name) if !ok { @@ -505,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { Err: errors.New("not a regular file"), } } - fr := &fileReader{ + return &fileReader{ r: r, size: ent.Size, ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err } + fr.preRead = preRead return io.NewSectionReader(fr, 0, fr.size), nil } @@ -521,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { } type fileReader struct { - r *Reader - size int64 - ents []*TOCEntry // 1 or more reg/chunk entries + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error } func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { @@ -578,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { - return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") } - return io.ReadFull(dr, p) + return retN, retErr } // A Writer writes stargz files. @@ -599,11 +662,20 @@ type Writer struct { lastGroupname map[int]string compressor Compressor + uncompressedCounter *countWriteFlusher + // ChunkSize optionally controls the maximum number of bytes // of data of a regular file that can be written in one gzip // stream before a new gzip stream is started. // Zero means to use a default, currently 4 MiB. ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} } // currentCompressionWriter writes to the current w.gz field, which can @@ -646,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { if err != nil { return nil, fmt.Errorf("failed to parse footer: %w", err) } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -672,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { bw := bufio.NewWriter(w) cw := &countWriter{w: bw} return &Writer{ - bw: bw, - cw: cw, - toc: &JTOC{Version: 1}, - diffHash: sha256.New(), - compressor: c, + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, } } @@ -717,6 +793,20 @@ func (w *Writer) closeGz() error { return nil } +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + // nameIfChanged returns name, unless it was the already the value of (*mp)[id], // in which case it returns the empty string. func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { @@ -736,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { func (w *Writer) condOpenGz() (err error) { if w.gz == nil { w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } } return } @@ -784,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { if lossless { tr.RawAccounting = true } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 for { h, err := tr.Next() if err == io.EOF { @@ -883,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { totalSize := ent.Size // save it before we destroy ent tee := io.TeeReader(tr, payloadDigest.Hash()) for written < totalSize { - if err := w.closeGz(); err != nil { - return err - } - chunkSize := int64(w.chunkSize()) remain := totalSize - written if remain < chunkSize { @@ -894,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } else { ent.ChunkSize = chunkSize } - ent.Offset = w.cw.n + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + ent.ChunkOffset = written chunkDigest := digest.Canonical.Digester() @@ -940,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { return err } +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + // DiffID returns the SHA-256 of the uncompressed tar bytes. // It is only valid to call DiffID after Close. func (w *Writer) DiffID() string { @@ -956,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { } func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } if len(tocBytes) > 0 { start := time.Now() toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) @@ -1021,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) { return } +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + // isGzip reports whether br is positioned right before an upcoming gzip stream. // It does not consume any bytes from br. func isGzip(br *bufio.Reader) bool { @@ -1039,3 +1210,14 @@ func positive(n int64) int64 { } return n } + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 591d7a62e11..f24afe32f45 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -60,7 +60,7 @@ type GzipCompressor struct { compressionLevel int } -func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { return gzip.NewWriterLevel(w, gc.compressionLevel) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 37448cae085..0ca6fd75f2e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,6 +31,7 @@ import ( "errors" "fmt" "io" + "math/rand" "os" "path/filepath" "reflect" @@ -44,21 +45,27 @@ import ( digest "github.com/opencontainers/go-digest" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - CountStreams(*testing.T, []byte) int + TestStreams(t *testing.T, b []byte, streams []int64) DiffIDOf(*testing.T, []byte) string String() string } // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingController) { +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } +type TestingControllerFactory func() TestingController + const ( uncompressedType int = iota gzipType @@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingController) { +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - chunkSize int - in []tarEntry + name string + chunkSize int + minChunkSize []int + in []tarEntry }{ { name: "regfiles and directories", @@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) { ), }, { - name: "various files", - chunkSize: 4, + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, in: tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo.txt", "a"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), dir("dev/"), @@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) { }, } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, prefix := range allowedPrefix { prefix := prefix - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { - tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) - // Test divideEntries() - entries, err := sortEntries(tarBlob, nil, nil) // identical order - if err != nil { - t.Fatalf("failed to parse tar: %v", err) - } - var merged []*entry - for _, part := range divideEntries(entries, 4) { - merged = append(merged, part...) - } - if !reflect.DeepEqual(entries, merged) { - for _, e := range entries { - t.Logf("Original: %v", e.header) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) } - for _, e := range merged { - t.Logf("Merged: %v", e.header) + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return } - t.Errorf("divided entries couldn't be merged") - return - } - // Prepare sample data - wantBuf := new(bytes.Buffer) - sw := NewWriterWithCompressor(wantBuf, cl) - sw.ChunkSize = tt.chunkSize - if err := sw.AppendTar(tarBlob); err != nil { - t.Fatalf("failed to append tar to want stargz: %v", err) - } - if _, err := sw.Close(); err != nil { - t.Fatalf("failed to prepare want stargz: %v", err) - } - wantData := wantBuf.Bytes() - want, err := Open(io.NewSectionReader( - bytes.NewReader(wantData), 0, int64(len(wantData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the want stargz: %v", err) - } + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } - // Prepare testing data - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(tt.chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to build stargz: %v", err) - } - defer rc.Close() - gotBuf := new(bytes.Buffer) - if _, err := io.Copy(gotBuf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - gotData := gotBuf.Bytes() - got, err := Open(io.NewSectionReader( - bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the got stargz: %v", err) - } + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } - // Check DiffID is properly calculated - rc.Close() - diffID := rc.DiffID() - wantDiffID := cl.DiffIDOf(t, gotData) - if diffID.String() != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } - // Compare as stargz - if !isSameVersion(t, cl, wantData, gotData) { - t.Errorf("built stargz hasn't same json") - return - } - if !isSameEntries(t, want, got) { - t.Errorf("built stargz isn't same as the original") - return - } + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } - // Compare as tar.gz - if !isSameTarGz(t, cl, wantData, gotData) { - t.Errorf("built stargz isn't same tar.gz") - return - } - }) + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } } } } @@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) { } } -func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { - aGz, err := controller.Reader(bytes.NewReader(a)) +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") } defer aGz.Close() - bGz, err := controller.Reader(bytes.NewReader(b)) + bGz, err := clb.Reader(bytes.NewReader(b)) if err != nil { t.Fatalf("failed to read B") } @@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return true } -func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { - aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) } - bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) if err != nil { t.Fatalf("failed to parse B: %v", err) } @@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool { a.GID == b.GID && a.Uname == b.Uname && a.Gname == b.Gname && - (a.Offset > 0) == (b.Offset > 0) && + (a.Offset >= 0) == (b.Offset >= 0) && (a.NextOffset() > 0) == (b.NextOffset() > 0) && a.DevMajor == b.DevMajor && a.DevMinor == b.DevMinor && @@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 // type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingController) { +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) - checks []check + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int }{ { name: "no-regfile", @@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { regDigest(t, "test/bar.txt", "bbb", dgstMap), ) }, + minChunkSize: []int{0, 64000}, checks: []check{ checkStargzTOC, checkVerifyTOC, @@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { }, }, { - name: "with-non-regfiles", + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), symlink("barlink", "test/bar.txt"), dir("test/"), regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), @@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { checkVerifyInvalidStargzFail(buildTar(t, tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), file("test/bar.txt", "testbartestbar"), @@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { - // Get original tar file and chunk digests - dgstMap := make(map[string]digest.Digest) - tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) - - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - tocDigest := rc.TOCDigest() - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - newStargz := buf.Bytes() - // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. - dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) - for _, check := range tt.checks { - check(t, newStargz, tocDigest, dgstMap, cl) - } - }) + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } } } } @@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var found bool @@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { - rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { t.Fatalf("failed to convert stargz: %v", err) } @@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { sgz, err := Open( io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), - WithDecompressors(controller), + WithDecompressors(cl), ) if err != nil { t.Fatalf("failed to parse converted stargz: %v", err) @@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT } // Decode the TOC JSON - tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) @@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingController) { +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} sampleOwner := owner{uid: 50, gid: 100} + data64KB := randomContents(64000) + tests := []struct { - name string - chunkSize int - in []tarEntry - want []stargzCheck - wantNumGz int // expected number of streams + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz wantFailOnLossLess bool + wantTOCVersion int // default = 1 }{ { - name: "empty", - in: tarOf(), - wantNumGz: 2, // empty tar + TOC + footer - wantNumGzLossLess: 3, // empty tar + TOC + footer + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer want: checks( numTOCEntries(0), ), @@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { dir("foo/"), file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), ), - wantNumGz: 9, + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer want: checks( numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file hasDir("foo/"), @@ -1326,23 +1371,108 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { mustSameEntry("foo/foo1", "foolink"), ), }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, } for _, tt := range tests { - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) var stargzBuf bytes.Buffer - w := NewWriterWithCompressor(&stargzBuf, cl) + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize if lossless { err := w.AppendTarLossLess(tr) if tt.wantFailOnLossLess { @@ -1366,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { if lossless { // Check if the result blob reserves original tar metadata - rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) if err != nil { t.Errorf("failed to decompress blob: %v", err) return @@ -1385,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } diffID := w.DiffID() - wantDiffID := cl.DiffIDOf(t, b) + wantDiffID := cl1.DiffIDOf(t, b) if diffID != wantDiffID { t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) } - got := cl.CountStreams(t, b) - wantNumGz := tt.wantNumGz - if lossless && tt.wantNumGzLossLess > 0 { - wantNumGz = tt.wantNumGzLossLess - } - if got != wantNumGz { - t.Errorf("number of streams = %d; want %d", got, wantNumGz) - } - telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) r, err := Open( - io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), - WithDecompressors(cl), + sr, + WithDecompressors(cl1), WithTelemetry(telemetry), ) if err != nil { t.Fatalf("stargz.Open: %v", err) } - if err := checkCalled(); err != nil { + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { t.Errorf("telemetry failure: %v", err) } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + for _, want := range tt.want { want.check(t, r) } @@ -1422,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } } -func newCalledTelemetry() (telemetry *Telemetry, check func() error) { +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { var getFooterLatencyCalled bool var getTocLatencyCalled bool var deserializeTocLatencyCalled bool @@ -1430,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) { func(time.Time) { getFooterLatencyCalled = true }, func(time.Time) { getTocLatencyCalled = true }, func(time.Time) { deserializeTocLatencyCalled = true }, - }, func() error { + }, func(needsGetTOC bool) error { var allErr []error if !getFooterLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) } - if !getTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } } if !deserializeTocLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) @@ -1573,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck { }) } +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + func hasFileContentsRange(file string, offset int, want string) stargzCheck { return stargzCheckFn(func(t *testing.T, r *Reader) { f, err := r.OpenFile(file) @@ -1585,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) } if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) } }) } @@ -1797,6 +2020,13 @@ func mustSameEntry(files ...string) stargzCheck { }) } +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { @@ -2056,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin }) } +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + func fileModeToTarMode(mode os.FileMode) (int64, error) { h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") if err != nil { @@ -2073,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 3bc74463ecf..57e0aa614e4 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -149,6 +149,12 @@ type TOCEntry struct { // ChunkSize. Offset int64 `json:"offset,omitempty"` + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + nextOffset int64 // the Offset of the next entry with a non-zero Offset // DevMajor is the major device number for "char" and "block" types. @@ -186,6 +192,9 @@ type TOCEntry struct { ChunkDigest string `json:"chunkDigest,omitempty"` children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int } // ModTime returns the entry's modification time. @@ -279,7 +288,10 @@ type Compressor interface { // Writer returns WriteCloser to be used for writing a chunk to eStargz. // Everytime a chunk is written, the WriteCloser is closed and Writer is // called again for writing the next chunk. - Writer(w io.Writer) (io.WriteCloser, error) + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) // WriteTOCAndFooter is called to write JTOC to the passed Writer. // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob @@ -303,8 +315,12 @@ type Decompressor interface { // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between // the top until the TOC JSON). // - // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range - // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) // ParseTOC parses TOC from the passed reader. The reader provides the partial contents @@ -313,5 +329,14 @@ type Decompressor interface { // This function returns tocDgst that represents the digest of TOC that will be used // to verify this blob. This must match to the value returned from // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) } + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 352280433e3..42c8fd72e63 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -67,6 +67,8 @@ type CommonBuildOptions struct { // NoHosts tells the builder not to create /etc/hosts content when running // containers. NoHosts bool + // NoNewPrivileges removes the ability for the container to gain privileges + NoNewPrivileges bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. OmitTimestamp bool @@ -139,10 +141,10 @@ type BuildOptions struct { TransientMounts []string // CacheFrom specifies any remote repository which can be treated as // potential cache source. - CacheFrom reference.Named + CacheFrom []reference.Named // CacheTo specifies any remote repository which can be treated as // potential cache destination. - CacheTo reference.Named + CacheTo []reference.Named // CacheTTL specifies duration, if specified using `--cache-ttl` then // cache intermediate images under this duration will be considered as // valid cache sources and images outside this duration will be ignored. @@ -186,6 +188,10 @@ type BuildOptions struct { // specified, indicating that the shared, system-wide default policy // should be used. SignaturePolicyPath string + // SkipUnusedStages allows users to skip stages in a multi-stage builds + // which do not contribute anything to the target stage. Expected default + // value is true. + SkipUnusedStages types.OptionalBool // ReportWriter is an io.Writer which will be used to report the // progress of the (possible) pulling of the source image and the // writing of the new image. @@ -292,6 +298,10 @@ type BuildOptions struct { // From is the image name to use to replace the value specified in the first // FROM instruction in the Containerfile From string + // GroupAdd is a list of groups to add to the primary process within + // the container. 'keep-groups' allows container processes to use + // supplementary groups. + GroupAdd []string // Platforms is the list of parsed OS/Arch/Variant triples that we want // to build the image for. If this slice has items in it, the OS and // Architecture fields above are ignored. diff --git a/vendor/github.com/containers/buildah/define/mount_unsupported.go b/vendor/github.com/containers/buildah/define/mount_unsupported.go new file mode 100644 index 00000000000..fe09bfccce5 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/mount_unsupported.go @@ -0,0 +1,17 @@ +//go:build darwin || windows +// +build darwin windows + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + + // TempDir is the default for storing temporary files + TempDir = "/var/tmp" +) + +var ( + // Mount potions for bind + BindOptions = []string{""} +) diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index cce41fc3f59..13256742662 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -5,7 +5,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "net/http" urlpkg "net/url" "os" @@ -30,7 +30,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.27.1" + Version = "1.29.0" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -121,13 +121,13 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err url != "-" { return "", "", nil } - name, err = ioutil.TempDir(dir, prefix) + name, err = os.MkdirTemp(dir, prefix) if err != nil { - return "", "", fmt.Errorf("error creating temporary directory for %q: %w", url, err) + return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err) } urlParsed, err := urlpkg.Parse(url) if err != nil { - return "", "", fmt.Errorf("error parsing url %q: %w", url, err) + return "", "", fmt.Errorf("parsing url %q: %w", url, err) } if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { combinedOutput, gitSubDir, err := cloneToDirectory(url, name) @@ -255,7 +255,7 @@ func downloadToDirectory(url, dir string) error { return err } defer resp1.Body.Close() - body, err := ioutil.ReadAll(resp1.Body) + body, err := io.ReadAll(resp1.Body) if err != nil { return err } @@ -271,7 +271,7 @@ func downloadToDirectory(url, dir string) error { func stdinToDirectory(dir string) error { logrus.Debugf("extracting stdin to %q", dir) r := bufio.NewReader(os.Stdin) - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return fmt.Errorf("failed to read from stdin: %w", err) } diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 28b179026ec..29c76113380 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -16,6 +16,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" storageTransport "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" @@ -99,6 +100,9 @@ type CopyOptions struct { PolicyAllowStorage bool // SignaturePolicyPath to overwrite the default one. SignaturePolicyPath string + // If non-empty, asks for signatures to be added during the copy + // using the provided signers. + Signers []*signer.Signer // If non-empty, asks for a signature to be added during the copy, and // specifies a key ID. SignBy string @@ -299,6 +303,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.imageCopyOptions.OciEncryptLayers = options.OciEncryptLayers c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures + c.imageCopyOptions.Signers = options.Signers c.imageCopyOptions.SignBy = options.SignBy c.imageCopyOptions.SignPassphrase = options.SignPassphrase c.imageCopyOptions.SignBySigstorePrivateKeyFile = options.SignBySigstorePrivateKeyFile @@ -413,7 +418,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err AllowedRegistries []string `json:"allowedRegistries,omitempty"` } if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { - return nil, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err) + return nil, fmt.Errorf("parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err) } blocked := false if len(sources.BlockedRegistries) > 0 { diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index 2cde098468f..431642f5de5 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -28,26 +28,51 @@ type ImageDiskUsage struct { // DiskUsage calculates the disk usage for each image in the local containers // storage. Note that a single image may yield multiple usage reports, one for // each repository tag. -func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, error) { +func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error) { layerTree, err := r.layerTree() if err != nil { - return nil, err + return nil, -1, err } images, err := r.ListImages(ctx, nil, nil) if err != nil { - return nil, err + return nil, -1, err } + var totalSize int64 + visitedImages := make(map[string]bool) + visistedLayers := make(map[string]bool) + var allUsages []ImageDiskUsage for _, image := range images { usages, err := diskUsageForImage(ctx, image, layerTree) if err != nil { - return nil, err + return nil, -1, err } allUsages = append(allUsages, usages...) + + if _, ok := visitedImages[image.ID()]; ok { + // Do not count an image twice + continue + } + visitedImages[image.ID()] = true + + size, err := image.Size() + if err != nil { + return nil, -1, err + } + for _, layer := range layerTree.layersOf(image) { + if _, ok := visistedLayers[layer.ID]; ok { + // Do not count a layer twice, so remove its + // size from the image size. + size -= layer.UncompressedSize + continue + } + visistedLayers[layer.ID] = true + } + totalSize += size } - return allUsages, err + return allUsages, totalSize, err } // diskUsageForImage returns the disk-usage baseistics for the specified image. diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index f387edf584e..8f85640e6c4 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -73,7 +73,8 @@ func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *Li // compileImageFilters creates `filterFunc`s for the specified filters. The // required format is `key=value` with the following supported keys: -// after, since, before, containers, dangling, id, label, readonly, reference, intermediate +// +// after, since, before, containers, dangling, id, label, readonly, reference, intermediate func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { logrus.Tracef("Parsing image filters %s", options.Filters) @@ -145,6 +146,9 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "id": filter = filterID(value) + case "digest": + filter = filterDigest(value) + case "intermediate": intermediate, err := r.bool(duplicate, key, value) if err != nil { @@ -382,6 +386,13 @@ func filterID(value string) filterFunc { } } +// filterDigest creates an digest filter for matching the specified value. +func filterDigest(value string) filterFunc { + return func(img *Image) (bool, error) { + return string(img.Digest()) == value, nil + } +} + // filterIntermediate creates an intermediate filter for images. An image is // considered to be an intermediate image if it is dangling (i.e., no tags) and // has no children (i.e., no other image depends on it). diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index b1866fa9b89..032dd139992 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -46,6 +46,8 @@ type Image struct { ociv1Image *ociv1.Image // Names() parsed into references. namesReferences []reference.Reference + // Calculating the Size() is expensive, so cache it. + size *int64 } } @@ -62,6 +64,7 @@ func (i *Image) reload() error { i.cached.completeInspectData = nil i.cached.ociv1Image = nil i.cached.namesReferences = nil + i.cached.size = nil return nil } @@ -470,14 +473,28 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma } if _, err := i.runtime.store.DeleteImage(i.ID(), true); handleError(err) != nil { + if errors.Is(err, storage.ErrImageUsedByContainer) { + err = fmt.Errorf("%w: consider listing external containers and force-removing image", err) + } return processedIDs, err } + report.Untagged = append(report.Untagged, i.Names()...) + if i.runtime.eventChannel != nil { + for _, name := range i.Names() { + i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) + } + } if !hasChildren { report.Removed = true } + // Do not delete any parents if NoPrune is true + if options.NoPrune { + return processedIDs, nil + } + // Check if can remove the parent image. if parent == nil { return processedIDs, nil @@ -495,7 +512,6 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma if !danglingParent { return processedIDs, nil } - // Recurse into removing the parent. return parent.removeRecursive(ctx, rmMap, processedIDs, "", options) } @@ -762,7 +778,13 @@ func (i *Image) Unmount(force bool) error { // Size computes the size of the image layers and associated data. func (i *Image) Size() (int64, error) { - return i.runtime.store.ImageSize(i.ID()) + if i.cached.size != nil { + return *i.cached.size, nil + } + + size, err := i.runtime.store.ImageSize(i.ID()) + i.cached.size = &size + return size, err } // HasDifferentDigestOptions allows for customizing the check if another diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index 5da8df1bf94..c6632d9a23b 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -190,7 +190,7 @@ func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageDat // NOTE: Health checks may be listed in the container config or // the config. data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck - if data.HealthCheck == nil { + if data.HealthCheck == nil && dockerManifest.Config != nil { data.HealthCheck = dockerManifest.Config.Healthcheck } } diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 05f21531b0a..8c84dc41f4a 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -126,6 +126,17 @@ func (r *Runtime) layerTree() (*layerTree, error) { return &tree, nil } +// layersOf returns all storage layers of the specified image. +func (t *layerTree) layersOf(image *Image) []*storage.Layer { + var layers []*storage.Layer + node := t.node(image.TopLayer()) + for node != nil { + layers = append(layers, node.layer) + node = node.parent + } + return layers +} + // children returns the child images of parent. Child images are images with // either the same top layer as parent or parent being the true parent layer. // Furthermore, the history of the parent and child images must match with the diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 89faa46350e..593eef04bb1 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -99,7 +99,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( } // loadMultiImageDockerArchive loads the docker archive specified by ref. In -// case the path@reference notation was used, only the specifiec image will be +// case the path@reference notation was used, only the specified image will be // loaded. Otherwise, all images will be loaded. func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { // If we cannot stat the path, it either does not exist OR the correct diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index cec44f1a534..bf1738a33e6 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -452,6 +452,7 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma ImageListSelection: options.ImageListSelection, Instances: options.Instances, ReportWriter: options.Writer, + Signers: options.Signers, SignBy: options.SignBy, SignPassphrase: options.SignPassphrase, SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 3bb4aff3c1b..0f3c1d7118b 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" @@ -61,6 +62,7 @@ type PushOptions struct { ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages ReportWriter io.Writer // will be used to log the writing of the list and any blobs + Signers []*signer.Signer // if non-empty, asks for signatures to be added during the copy using the provided signers. SignBy string // fingerprint of GPG key to use to sign images SignPassphrase string // passphrase to use when signing with the key ID from SignBy. SignBySigstorePrivateKeyFile string // if non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. @@ -87,11 +89,11 @@ func Create() List { func LoadFromImage(store storage.Store, image string) (string, List, error) { img, err := store.Image(image) if err != nil { - return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err) + return "", nil, fmt.Errorf("locating image %q for loading manifest list: %w", image, err) } manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix) if err != nil { - return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err) + return "", nil, fmt.Errorf("locating image %q for loading manifest list: %w", image, err) } manifestList, err := manifests.FromBlob(manifestBytes) if err != nil { @@ -103,10 +105,10 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { } instancesBytes, err := store.ImageBigData(img.ID, instancesData) if err != nil { - return "", nil, fmt.Errorf("error locating image %q for loading instance list: %w", image, err) + return "", nil, fmt.Errorf("locating image %q for loading instance list: %w", image, err) } if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { - return "", nil, fmt.Errorf("error decoding instance list for image %q: %w", image, err) + return "", nil, fmt.Errorf("decoding instance list for image %q: %w", image, err) } list.instances[""] = img.ID return img.ID, list, err @@ -152,18 +154,18 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, } return imageID, nil } - return "", fmt.Errorf("error creating image to hold manifest list: %w", err) + return "", fmt.Errorf("creating image to hold manifest list: %w", err) } // Reference returns an image reference for the composite image being built // in the list, or an error if the list has never been saved to a local image. func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { if l.instances[""] == "" { - return nil, fmt.Errorf("error building reference to list: %w", ErrListImageUnknown) + return nil, fmt.Errorf("building reference to list: %w", ErrListImageUnknown) } s, err := is.Transport.ParseStoreReference(store, l.instances[""]) if err != nil { - return nil, fmt.Errorf("error creating ImageReference from image %q: %w", l.instances[""], err) + return nil, fmt.Errorf("creating ImageReference from image %q: %w", l.instances[""], err) } references := make([]types.ImageReference, 0, len(l.instances)) whichInstances := make([]digest.Digest, 0, len(l.instances)) @@ -187,7 +189,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in imageName := l.instances[instance] ref, err := alltransports.ParseImageName(imageName) if err != nil { - return nil, fmt.Errorf("error creating ImageReference from image %q: %w", imageName, err) + return nil, fmt.Errorf("creating ImageReference from image %q: %w", imageName, err) } references = append(references, ref) } @@ -199,7 +201,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push // Load the system signing policy. pushPolicy, err := signature.DefaultPolicy(options.SystemContext) if err != nil { - return nil, "", fmt.Errorf("error obtaining default signature policy: %w", err) + return nil, "", fmt.Errorf("obtaining default signature policy: %w", err) } // Override the settings for local storage to make sure that we can always read the source "image". @@ -207,7 +209,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push policyContext, err := signature.NewPolicyContext(pushPolicy) if err != nil { - return nil, "", fmt.Errorf("error creating new signature policy context: %w", err) + return nil, "", fmt.Errorf("creating new signature policy context: %w", err) } defer func() { if err2 := policyContext.Destroy(); err2 != nil { @@ -244,6 +246,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push DestinationCtx: options.SystemContext, ReportWriter: options.ReportWriter, RemoveSignatures: options.RemoveSignatures, + Signers: options.Signers, SignBy: options.SignBy, SignPassphrase: options.SignPassphrase, SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, @@ -273,7 +276,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) { src, err := ref.NewImageSource(ctx, sys) if err != nil { - return "", fmt.Errorf("error setting up to read manifest and configuration from %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("setting up to read manifest and configuration from %q: %w", transports.ImageName(ref), err) } defer src.Close() @@ -288,13 +291,13 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil) if err != nil { - return "", fmt.Errorf("error reading manifest from %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("reading manifest from %q: %w", transports.ImageName(ref), err) } if manifest.MIMETypeIsMultiImage(primaryManifestType) { lists, err := manifests.FromBlob(primaryManifestBytes) if err != nil { - return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("parsing manifest list in %q: %w", transports.ImageName(ref), err) } if all { for i, instance := range lists.OCIv1().Manifests { @@ -318,11 +321,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } else { list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType) if err != nil { - return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("parsing manifest list in %q: %w", transports.ImageName(ref), err) } instanceDigest, err := list.ChooseInstance(sys) if err != nil { - return "", fmt.Errorf("error selecting image from manifest list in %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("selecting image from manifest list in %q: %w", transports.ImageName(ref), err) } added := false for i, instance := range lists.OCIv1().Manifests { @@ -364,11 +367,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if instanceInfo.OS == "" || instanceInfo.Architecture == "" { img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) if err != nil { - return "", fmt.Errorf("error reading configuration blob from %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("reading configuration blob from %q: %w", transports.ImageName(ref), err) } config, err := img.OCIConfig(ctx) if err != nil { - return "", fmt.Errorf("error reading info about config blob from %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("reading info about config blob from %q: %w", transports.ImageName(ref), err) } if instanceInfo.OS == "" { instanceInfo.OS = config.OS @@ -382,12 +385,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) if err != nil { - return "", fmt.Errorf("error reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) + return "", fmt.Errorf("reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) } if instanceInfo.instanceDigest == nil { manifestDigest, err = manifest.Digest(manifestBytes) if err != nil { - return "", fmt.Errorf("error computing digest of manifest from %q: %w", transports.ImageName(ref), err) + return "", fmt.Errorf("computing digest of manifest from %q: %w", transports.ImageName(ref), err) } instanceInfo.instanceDigest = &manifestDigest instanceInfo.Size = int64(len(manifestBytes)) @@ -396,7 +399,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) if err != nil { - return "", fmt.Errorf("error adding instance with digest %q: %w", *instanceInfo.instanceDigest, err) + return "", fmt.Errorf("adding instance with digest %q: %w", *instanceInfo.instanceDigest, err) } if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index be2d30206d7..bb3cdbc7c0d 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -18,7 +18,7 @@ func NormalizeName(name string) (reference.Named, error) { // NOTE: this code is in symmetrie with containers/image/pkg/shortnames. ref, err := reference.Parse(name) if err != nil { - return nil, fmt.Errorf("error normalizing name %q: %w", name, err) + return nil, fmt.Errorf("normalizing name %q: %w", name, err) } named, ok := ref.(reference.Named) diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go index 736a193f6fa..06c15ee648f 100644 --- a/vendor/github.com/containers/common/libimage/platform.go +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -6,6 +6,7 @@ import ( "runtime" "github.com/containerd/containerd/platforms" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -20,9 +21,18 @@ const ( ) // NormalizePlatform normalizes (according to the OCI spec) the specified os, -// arch and variant. If left empty, the individual item will not be normalized. +// arch and variant. If left empty, the individual item will be normalized. func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { - rawPlatform := toPlatformString(rawOS, rawArch, rawVariant) + platformSpec := v1.Platform{ + OS: rawOS, + Architecture: rawArch, + Variant: rawVariant, + } + normalizedSpec := platforms.Normalize(platformSpec) + if normalizedSpec.Variant == "" && rawVariant != "" { + normalizedSpec.Variant = rawVariant + } + rawPlatform := toPlatformString(normalizedSpec.OS, normalizedSpec.Architecture, normalizedSpec.Variant) normalizedPlatform, err := platforms.Parse(rawPlatform) if err != nil { logrus.Debugf("Error normalizing platform: %v", err) @@ -38,7 +48,7 @@ func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant str arch = normalizedPlatform.Architecture } variant = rawVariant - if rawVariant != "" { + if rawVariant != "" || (rawVariant == "" && normalizedPlatform.Variant != "") { variant = normalizedPlatform.Variant } return os, arch, variant @@ -59,10 +69,13 @@ func toPlatformString(os, arch, variant string) string { // Checks whether the image matches the specified platform. // Returns -// * 1) a matching error that can be used for logging (or returning) what does not match -// * 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) -// * 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) +// - 1) a matching error that can be used for logging (or returning) what does not match +// - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) +// - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { + if err := i.isCorrupted(""); err != nil { + return err, false, nil + } inspectInfo, err := i.inspectInfo(ctx) if err != nil { return nil, false, fmt.Errorf("inspecting image: %w", err) @@ -83,5 +96,5 @@ func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) ( return nil, customPlatform, nil } - return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", fromImage, expected), customPlatform, nil + return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", platforms.Format(fromImage), platforms.Format(expected)), customPlatform, nil } diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 86c9ebef156..955132868ff 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -163,7 +163,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP for _, name := range pulledImages { image, _, err := r.LookupImage(name, nil) if err != nil { - return nil, fmt.Errorf("error locating pulled image %q name in containers storage: %w", name, err) + return nil, fmt.Errorf("locating pulled image %q name in containers storage: %w", name, err) } // Note that we can ignore the 2nd return value here. Some @@ -232,7 +232,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, imageName = storageName case ociArchiveTransport.Transport.Name(): - manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptor(ref) + manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptorWithContext(r.SystemContext(), ref) if err != nil { return nil, err } @@ -318,7 +318,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte for _, destName := range destNames { destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName) if err != nil { - return nil, nil, fmt.Errorf("error parsing dest reference name %#v: %w", destName, err) + return nil, nil, fmt.Errorf("parsing dest reference name %#v: %w", destName, err) } references = append(references, destRef) } @@ -399,7 +399,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference } tagged, err := reference.WithTag(named, tag) if err != nil { - return nil, fmt.Errorf("error creating tagged reference (name %s, tag %s): %w", named.String(), tag, err) + return nil, fmt.Errorf("creating tagged reference (name %s, tag %s): %w", named.String(), tag, err) } pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options) if err != nil { diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 6030a179b14..7cbf9c95eb6 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -608,6 +608,8 @@ type RemoveImagesOptions struct { // much space was freed. However, computing the size of an image is // comparatively expensive, so it is made optional. WithSize bool + // NoPrune will not remove dangling images + NoPrune bool } // RemoveImages removes images specified by names. If no names are specified, @@ -653,7 +655,6 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem toDelete := []string{} // Look up images in the local containers storage and fill out // toDelete and the deleteMap. - switch { case len(names) > 0: // prepare lookupOptions diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 0b58055b42a..b0e2ca6fbbd 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -281,7 +281,7 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr } tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef) if err != nil { - return nil, fmt.Errorf("error getting repository tags: %v", err) + return nil, fmt.Errorf("getting repository tags: %v", err) } limit := searchMaxQueries if len(tags) < limit { diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index da8fa31c624..e367f9ad3b7 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -40,6 +40,7 @@ const ( MTUOption = "mtu" ModeOption = "mode" IsolateOption = "isolate" + MetricOption = "metric" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index de865537738..b8804bf6b3e 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -9,7 +9,9 @@ import ( type ContainerNetwork interface { // NetworkCreate will take a partial filled Network and fill the // missing fields. It creates the Network and returns the full Network. - NetworkCreate(Network) (Network, error) + NetworkCreate(Network, *NetworkCreateOptions) (Network, error) + // NetworkUpdate will take network name and ID and updates network DNS Servers. + NetworkUpdate(nameOrID string, options NetworkUpdateOptions) error // NetworkRemove will remove the Network with the given name or ID. NetworkRemove(nameOrID string) error // NetworkList will return all known Networks. Optionally you can @@ -54,8 +56,12 @@ type Network struct { // to public or other Networks. Internal bool `json:"internal"` // DNSEnabled is whether name resolution is active for container on - // this Network. + // this Network. Only supported with the bridge driver. DNSEnabled bool `json:"dns_enabled"` + // List of custom DNS server for podman's DNS resolver at network level, + // all the containers attached to this network will consider resolvers + // configured at network level. + NetworkDNSServers []string `json:"network_dns_servers,omitempty"` // Labels is a set of key-value labels that have been applied to the // Network. Labels map[string]string `json:"labels,omitempty"` @@ -66,6 +72,14 @@ type Network struct { IPAMOptions map[string]string `json:"ipam_options,omitempty"` } +// NetworkOptions for a given container. +type NetworkUpdateOptions struct { + // List of custom DNS server for podman's DNS resolver. + // Priority order will be kept as defined by user in the configuration. + AddDNSServers []string `json:"add_dns_servers,omitempty"` + RemoveDNSServers []string `json:"remove_dns_servers,omitempty"` +} + // IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. type IPNet struct { net.IPNet @@ -199,6 +213,7 @@ type NetAddress struct { // PerNetworkOptions are options which should be set on a per network basis. type PerNetworkOptions struct { // StaticIPs for this container. Optional. + // swagger:type []string StaticIPs []net.IP `json:"static_ips,omitempty"` // Aliases contains a list of names which the dns server should resolve // to this container. Should only be set when DNSEnabled is true on the Network. @@ -207,6 +222,7 @@ type PerNetworkOptions struct { // Optional. Aliases []string `json:"aliases,omitempty"` // StaticMac for this container. Optional. + // swagger:strfmt string StaticMAC HardwareAddr `json:"static_mac,omitempty"` // InterfaceName for this container. Required in the backend. // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. @@ -224,6 +240,9 @@ type NetworkOptions struct { // Networks contains all networks with the PerNetworkOptions. // The map should contain at least one element. Networks map[string]PerNetworkOptions `json:"networks"` + // List of custom DNS server for podman's DNS resolver. + // Priority order will be kept as defined by user in the configuration. + DNSServers []string `json:"dns_servers,omitempty"` } // PortMapping is one or more ports that will be mapped into the container. @@ -280,3 +299,8 @@ type TeardownOptions struct { // FilterFunc can be passed to NetworkList to filter the networks. type FilterFunc func(Network) bool + +type NetworkCreateOptions struct { + // IgnoreIfExists if true, do not fail if the network already exists + IgnoreIfExists bool +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go index 9c93618df5d..7c9c45042c4 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -143,9 +142,9 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) basePath := cgroupRoot + userSlice controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) } - controllersFileBytes, err := ioutil.ReadFile(controllersFile) + controllersFileBytes, err := os.ReadFile(controllersFile) if err != nil { - return nil, fmt.Errorf("failed while reading controllers for cgroup v2 from %q: %w", controllersFile, err) + return nil, fmt.Errorf("failed while reading controllers for cgroup v2: %w", err) } for _, controllerName := range strings.Fields(string(controllersFileBytes)) { c := controller{ @@ -264,7 +263,7 @@ func (c *CgroupControl) initialize() (err error) { }() if c.cgroup2 { if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { - return fmt.Errorf("error creating cgroup path %s: %w", c.path, err) + return fmt.Errorf("creating cgroup path %s: %w", c.path, err) } } for name, handler := range handlers { @@ -285,7 +284,7 @@ func (c *CgroupControl) initialize() (err error) { } path := c.getCgroupv1Path(ctr.name) if err := os.MkdirAll(path, 0o755); err != nil { - return fmt.Errorf("error creating cgroup path for %s: %w", ctr.name, err) + return fmt.Errorf("creating cgroup path for %s: %w", ctr.name, err) } } } @@ -294,7 +293,7 @@ func (c *CgroupControl) initialize() (err error) { } func readFileAsUint64(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -310,7 +309,7 @@ func readFileAsUint64(path string) (uint64, error) { } func readFileByKeyAsUint64(path, key string) (uint64, error) { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { return 0, err } @@ -432,7 +431,7 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { // GetUserConnection returns an user connection to D-BUS func GetUserConnection(uid int) (*systemdDbus.Conn, error) { return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivate) + return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) }) } @@ -533,7 +532,7 @@ func (c *CgroupControl) AddPid(pid int) error { if c.cgroup2 { p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") - if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { + if err := os.WriteFile(p, pidString, 0o644); err != nil { return fmt.Errorf("write %s: %w", p, err) } return nil @@ -556,7 +555,7 @@ func (c *CgroupControl) AddPid(pid int) error { continue } p := filepath.Join(c.getCgroupv1Path(n), "tasks") - if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { + if err := os.WriteFile(p, pidString, 0o644); err != nil { return fmt.Errorf("write %s: %w", p, err) } } diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 45f7bde2975..03d85750d1e 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -96,9 +95,9 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) basePath := cgroupRoot + userSlice controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) } - controllersFileBytes, err := ioutil.ReadFile(controllersFile) + controllersFileBytes, err := os.ReadFile(controllersFile) if err != nil { - return nil, fmt.Errorf("failed while reading controllers for cgroup v2 from %q: %w", controllersFile, err) + return nil, fmt.Errorf("failed while reading controllers for cgroup v2: %w", err) } for _, controllerName := range strings.Fields(string(controllersFileBytes)) { c := controller{ @@ -217,7 +216,7 @@ func (c *CgroupControl) initialize() (err error) { }() if c.cgroup2 { if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.config.Path)); err != nil { - return fmt.Errorf("error creating cgroup path %s: %w", c.config.Path, err) + return fmt.Errorf("creating cgroup path %s: %w", c.config.Path, err) } } for name, handler := range handlers { @@ -238,7 +237,7 @@ func (c *CgroupControl) initialize() (err error) { } path := c.getCgroupv1Path(ctr.name) if err := os.MkdirAll(path, 0o755); err != nil { - return fmt.Errorf("error creating cgroup path for %s: %w", ctr.name, err) + return fmt.Errorf("creating cgroup path for %s: %w", ctr.name, err) } } } @@ -247,7 +246,7 @@ func (c *CgroupControl) initialize() (err error) { } func readFileAsUint64(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -263,7 +262,7 @@ func readFileAsUint64(path string) (uint64, error) { } func readFileByKeyAsUint64(path, key string) (uint64, error) { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { return 0, err } @@ -516,7 +515,7 @@ func (c *CgroupControl) AddPid(pid int) error { continue } p := filepath.Join(c.getCgroupv1Path(n), "tasks") - if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { + if err := os.WriteFile(p, pidString, 0o644); err != nil { return fmt.Errorf("write %s: %w", p, err) } } diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go index 419bc4ec3c9..3a86122392a 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -7,7 +7,6 @@ import ( "bufio" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -80,7 +79,7 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { } s := st.Sys() if s == nil { - return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) + return false, fmt.Errorf("stat cgroup path %s", cgroupPath) } if int(s.(*syscall.Stat_t).Uid) != uid { @@ -99,12 +98,12 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { func rmDirRecursively(path string) error { killProcesses := func(signal syscall.Signal) { if signal == unix.SIGKILL { - if err := ioutil.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { + if err := os.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { return } } // kill all the processes that are still part of the cgroup - if procs, err := ioutil.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { + if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { for _, pidS := range strings.Split(string(procs), "\n") { if pid, err := strconv.Atoi(pidS); err == nil { _ = unix.Kill(pid, signal) @@ -116,7 +115,7 @@ func rmDirRecursively(path string) error { if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) { return nil } - entries, err := ioutil.ReadDir(path) + entries, err := os.ReadDir(path) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go index 1cb7ced8257..76e983ea967 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/pids.go +++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go @@ -5,7 +5,7 @@ package cgroups import ( "fmt" - "io/ioutil" + "os" "path/filepath" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -31,7 +31,7 @@ func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { } p := filepath.Join(PIDRoot, "pids.max") - return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0o644) + return os.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0o644) } // Create the cgroup diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go index 118fa97a161..80a7bde288a 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go @@ -53,18 +53,20 @@ func systemdCreate(path string, c *systemdDbus.Conn) error { } /* - systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that - has the following license: - Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that +has the following license: +Copyright The containerd Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ func systemdDestroyConn(path string, c *systemdDbus.Conn) error { name := filepath.Base(path) diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index 92b87fdaf8c..e8107604dee 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -108,22 +108,22 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con } /* - systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that - has the following license: +systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that +has the following license: - Copyright The containerd Authors. +Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ func systemdDestroyConn(path string, c *systemdDbus.Conn) error { name := filepath.Base(path) diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils.go b/vendor/github.com/containers/common/pkg/cgroups/utils.go index c7f86d7e118..8ade67878d4 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -24,9 +23,9 @@ func readAcct(ctr *CgroupControl, name string) (uint64, error) { func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) - data, err := ioutil.ReadFile(p) + data, err := os.ReadFile(p) if err != nil { - return nil, fmt.Errorf("reading %s: %w", p, err) + return nil, err } r := []uint64{} for _, s := range strings.Split(string(data), " ") { @@ -54,7 +53,7 @@ func GetSystemCPUUsage() (uint64, error) { return readFileAsUint64(p) } - files, err := ioutil.ReadDir(cgroupRoot) + files, err := os.ReadDir(cgroupRoot) if err != nil { return 0, err } @@ -90,9 +89,14 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { if cgroupv2 { parentPath = fmt.Sprintf("%s.effective", parentPath) } - data, err := ioutil.ReadFile(parentPath) + data, err := os.ReadFile(parentPath) if err != nil { - return nil, fmt.Errorf("open %s: %w", path, err) + // if the file doesn't exist, it is likely that the cpuset controller + // is not enabled in the kernel. + if os.IsNotExist(err) { + return nil, nil + } + return nil, err } if strings.Trim(string(data), "\n") != "" { return data, nil @@ -101,7 +105,7 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { if err != nil { return nil, err } - if err := ioutil.WriteFile(path, data, 0o644); err != nil { + if err := os.WriteFile(path, data, 0o644); err != nil { return nil, fmt.Errorf("write %s: %w", path, err) } return data, nil @@ -121,7 +125,7 @@ func createCgroupv2Path(path string) (deferredError error) { if !strings.HasPrefix(path, cgroupRoot+"/") { return fmt.Errorf("invalid cgroup path %s", path) } - content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers") + content, err := os.ReadFile(cgroupRoot + "/cgroup.controllers") if err != nil { return err } @@ -149,7 +153,7 @@ func createCgroupv2Path(path string) (deferredError error) { // We enable the controllers for all the path components except the last one. It is not allowed to add // PIDs if there are already enabled controllers. if i < len(elements[3:])-1 { - if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { + if err := os.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { return err } } @@ -169,7 +173,7 @@ func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { } if err := os.MkdirAll(cPath, 0o755); err != nil { - return false, fmt.Errorf("error creating cgroup for %s: %w", controller, err) + return false, fmt.Errorf("creating cgroup for %s: %w", controller, err) } return true, nil } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 3d90268cd45..5e965402c28 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "sort" "strings" "sync" @@ -27,6 +28,8 @@ const ( _configPath = "containers/containers.conf" // UserOverrideContainersConfig holds the containers config path overridden by the rootless user UserOverrideContainersConfig = ".config/" + _configPath + // Token prefix for looking for helper binary under $BINDIR + bindirPrefix = "$BINDIR" ) // RuntimeStateStore is a constant indicating which state store implementation @@ -191,6 +194,9 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` + // ReadOnly causes engine to run all containers with root file system mounted read-only + ReadOnly bool `toml:"read_only,omitempty"` + // SeccompProfile is the seccomp.json profile path which is used as the // default for the runtime. SeccompProfile string `toml:"seccomp_profile,omitempty"` @@ -211,6 +217,7 @@ type ContainersConfig struct { UserNS string `toml:"userns,omitempty"` // UserNSSize how many UIDs to allocate for automatically created UserNS + // Deprecated: no user of this field is known. UserNSSize int `toml:"userns_size,omitempty,omitzero"` } @@ -234,6 +241,10 @@ type EngineConfig struct { // The first path pointing to a valid file will be used. ConmonPath []string `toml:"conmon_path,omitempty"` + // ConmonRsPath is the path to the Conmon-rs binary used for managing containers. + // The first path pointing to a valid file will be used. + ConmonRsPath []string `toml:"conmonrs_path,omitempty"` + // CompatAPIEnforceDockerHub enforces using docker.io for completing // short names in Podman's compatibility REST API. Note that this will // ignore unqualified-search-registries and short-name aliases defined @@ -265,6 +276,11 @@ type EngineConfig struct { // EventsLogger determines where events should be logged. EventsLogger string `toml:"events_logger,omitempty"` + // EventsContainerCreateInspectData creates a more verbose + // container-create event which includes a JSON payload with detailed + // information about the container. + EventsContainerCreateInspectData bool `toml:"events_container_create_inspect_data,omitempty"` + // graphRoot internal stores the location of the graphroot graphRoot string @@ -351,6 +367,9 @@ type EngineConfig struct { // OCIRuntimes are the set of configured OCI runtimes (default is runc). OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + // PlatformToOCIRuntime requests specific OCI runtime for a specified platform of image. + PlatformToOCIRuntime map[string]string `toml:"platform_to_oci_runtime,omitempty"` + // PodExitPolicy determines the behaviour when the last container of a pod exits. PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"` @@ -375,6 +394,9 @@ type EngineConfig struct { // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` + // SSHConfig contains the ssh config file path if not the default + SSHConfig string `toml:"ssh_config,omitempty"` + // RuntimePath is the path to OCI runtime binary for launching containers. // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be @@ -447,6 +469,13 @@ type EngineConfig struct { // may not be by other drivers. VolumePath string `toml:"volume_path,omitempty"` + // VolumePluginTimeout sets the default timeout, in seconds, for + // operations that must contact a volume plugin. Plugins are external + // programs accessed via REST API; this sets a timeout for requests to + // that API. + // A value of 0 is treated as no timeout. + VolumePluginTimeout uint `toml:"volume_plugin_timeout,omitempty,omitzero"` + // VolumePlugins is a set of plugins that can be used as the backend for // Podman named volumes. Each volume is specified as a name (what Podman // will refer to the plugin as) mapped to a path, which must point to a @@ -562,6 +591,7 @@ type SecretConfig struct { // ConfigMapConfig represents the "configmap" TOML config table // // revive does not like the name because the package is already called config +// //nolint:revive type ConfigMapConfig struct { // Driver specifies the configmap driver to use. @@ -596,6 +626,19 @@ type Destination struct { // Identity file with ssh key, optional Identity string `toml:"identity,omitempty"` + + // isMachine describes if the remote destination is a machine. + IsMachine bool `toml:"is_machine,omitempty"` +} + +// Consumes container image's os and arch and returns if any dedicated runtime was +// configured otherwise returns default runtime. +func (c *EngineConfig) ImagePlatformToRuntime(os string, arch string) string { + platformString := os + "/" + arch + if val, ok := c.PlatformToOCIRuntime[platformString]; ok { + return val + } + return c.OCIRuntime } // NewConfig creates a new Config. It starts with an empty config and, if @@ -808,9 +851,21 @@ func (c *Config) Validate() error { return nil } +// URI returns the URI Path to the machine image +func (m *MachineConfig) URI() string { + uri := m.Image + for _, val := range []string{"$ARCH", "$arch"} { + uri = strings.Replace(uri, val, runtime.GOARCH, 1) + } + for _, val := range []string{"$OS", "$os"} { + uri = strings.Replace(uri, val, runtime.GOOS, 1) + } + return uri +} + func (c *EngineConfig) findRuntime() string { // Search for crun first followed by runc, kata, runsc - for _, name := range []string{"crun", "runc", "runj", "kata", "runsc"} { + for _, name := range []string{"crun", "runc", "runj", "kata", "runsc", "ocijail"} { for _, v := range c.OCIRuntimes[name] { if _, err := os.Stat(v); err == nil { return name @@ -912,8 +967,11 @@ func (c *NetworkConfig) Validate() error { // to first (version) matching conmon binary. If non is found, we try // to do a path lookup of "conmon". func (c *Config) FindConmon() (string, error) { - foundOutdatedConmon := false - for _, path := range c.Engine.ConmonPath { + return findConmonPath(c.Engine.ConmonPath, "conmon") +} + +func findConmonPath(paths []string, binaryName string) (string, error) { + for _, path := range paths { stat, err := os.Stat(path) if err != nil { continue @@ -921,33 +979,25 @@ func (c *Config) FindConmon() (string, error) { if stat.IsDir() { continue } - if err := probeConmon(path); err != nil { - logrus.Warnf("Conmon at %s invalid: %v", path, err) - foundOutdatedConmon = true - continue - } logrus.Debugf("Using conmon: %q", path) return path, nil } // Search the $PATH as last fallback - if path, err := exec.LookPath("conmon"); err == nil { - if err := probeConmon(path); err != nil { - logrus.Warnf("Conmon at %s is invalid: %v", path, err) - foundOutdatedConmon = true - } else { - logrus.Debugf("Using conmon from $PATH: %q", path) - return path, nil - } - } - - if foundOutdatedConmon { - return "", fmt.Errorf("please update to v%d.%d.%d or later: %w", - _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion, ErrConmonOutdated) + if path, err := exec.LookPath(binaryName); err == nil { + logrus.Debugf("Using conmon from $PATH: %q", path) + return path, nil } return "", fmt.Errorf("could not find a working conmon binary (configured options: %v: %w)", - c.Engine.ConmonPath, ErrInvalidArg) + paths, ErrInvalidArg) +} + +// FindConmonRs iterates over (*Config).ConmonRsPath and returns the path +// to first (version) matching conmonrs binary. If non is found, we try +// to do a path lookup of "conmonrs". +func (c *Config) FindConmonRs() (string, error) { + return findConmonPath(c.Engine.ConmonRsPath, "conmonrs") } // GetDefaultEnv returns the environment variables for the container. @@ -994,10 +1044,11 @@ func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []s // Device parses device mapping string to a src, dest & permissions string // Valid values for device looklike: -// '/dev/sdc" -// '/dev/sdc:/dev/xvdc" -// '/dev/sdc:/dev/xvdc:rwm" -// '/dev/sdc:rm" +// +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" func Device(device string) (src, dst, permissions string, err error) { permissions = "rwm" split := strings.Split(device, ":") @@ -1195,38 +1246,65 @@ func Reload() (*Config, error) { return defConfig() } -func (c *Config) ActiveDestination() (uri, identity string, err error) { +func (c *Config) ActiveDestination() (uri, identity string, machine bool, err error) { if uri, found := os.LookupEnv("CONTAINER_HOST"); found { if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { identity = v } - return uri, identity, nil + return uri, identity, false, nil } connEnv := os.Getenv("CONTAINER_CONNECTION") switch { case connEnv != "": d, found := c.Engine.ServiceDestinations[connEnv] if !found { - return "", "", fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) + return "", "", false, fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) } - return d.URI, d.Identity, nil + return d.URI, d.Identity, d.IsMachine, nil case c.Engine.ActiveService != "": d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] if !found { - return "", "", fmt.Errorf("%q service destination not found", c.Engine.ActiveService) + return "", "", false, fmt.Errorf("%q service destination not found", c.Engine.ActiveService) } - return d.URI, d.Identity, nil + return d.URI, d.Identity, d.IsMachine, nil case c.Engine.RemoteURI != "": - return c.Engine.RemoteURI, c.Engine.RemoteIdentity, nil + return c.Engine.RemoteURI, c.Engine.RemoteIdentity, false, nil + } + return "", "", false, errors.New("no service destination configured") +} + +var ( + bindirFailed = false + bindirCached = "" +) + +func findBindir() string { + if bindirCached != "" || bindirFailed { + return bindirCached + } + execPath, err := os.Executable() + if err == nil { + // Resolve symbolic links to find the actual binary file path. + execPath, err = filepath.EvalSymlinks(execPath) } - return "", "", errors.New("no service destination configured") + if err != nil { + // If failed to find executable (unlikely to happen), warn about it. + // The bindirFailed flag will track this, so we only warn once. + logrus.Warnf("Failed to find $BINDIR: %v", err) + bindirFailed = true + return "" + } + bindirCached = filepath.Dir(execPath) + return bindirCached } // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { dirList := c.Engine.HelperBinariesDir + bindirPath := "" + bindirSearched := false // If set, search this directory first. This is used in testing. if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { @@ -1234,6 +1312,24 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) } for _, path := range dirList { + if path == bindirPrefix || strings.HasPrefix(path, bindirPrefix+string(filepath.Separator)) { + // Calculate the path to the executable first time we encounter a $BINDIR prefix. + if !bindirSearched { + bindirSearched = true + bindirPath = findBindir() + } + // If there's an error, don't stop the search for the helper binary. + // findBindir() will have warned once during the first failure. + if bindirPath == "" { + continue + } + // Replace the $BINDIR prefix with the path to the directory of the current binary. + if path == bindirPrefix { + path = bindirPath + } else { + path = filepath.Join(bindirPath, strings.TrimPrefix(path, bindirPrefix+string(filepath.Separator))) + } + } fullpath := filepath.Join(path, name) if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { return fullpath, nil diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 0ab9e029412..13bd3a3769b 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -10,6 +10,10 @@ const ( // DefaultContainersConfig holds the default containers config path DefaultContainersConfig = "/usr/share/" + _configPath + + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/etc/containers/policy.json" ) // podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. @@ -35,4 +39,6 @@ var defaultHelperBinariesDir = []string{ "/usr/local/lib/podman", "/usr/libexec/podman", "/usr/lib/podman", + // Relative to the binary directory + "$BINDIR/../libexec/podman", } diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go index d6981235667..903f0b47ce6 100644 --- a/vendor/github.com/containers/common/pkg/config/config_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -10,6 +10,10 @@ const ( // DefaultContainersConfig holds the default containers config path DefaultContainersConfig = "/usr/local/share/" + _configPath + + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json" ) // podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index 4f0889f2962..4ce5d032096 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -13,6 +13,10 @@ const ( // DefaultContainersConfig holds the default containers config path DefaultContainersConfig = "/usr/share/" + _configPath + + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/etc/containers/policy.json" ) func selinuxEnabled() bool { @@ -23,7 +27,7 @@ func customConfigFile() (string, error) { if path, found := os.LookupEnv("CONTAINERS_CONF"); found { return path, nil } - if unshare.IsRootless() { + if unshare.GetRootlessUID() > 0 { path, err := rootlessConfigPath() if err != nil { return "", err @@ -34,7 +38,7 @@ func customConfigFile() (string, error) { } func ifRootlessConfigPath() (string, error) { - if unshare.IsRootless() { + if unshare.GetRootlessUID() > 0 { path, err := rootlessConfigPath() if err != nil { return "", err diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index bc8ddc65588..e101b062193 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -11,6 +11,7 @@ import ( "strings" "syscall" + "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" units "github.com/docker/go-units" ) @@ -57,6 +58,9 @@ func (c *EngineConfig) validatePaths() error { func (c *ContainersConfig) validateDevices() error { for _, d := range c.Devices { + if cdi.IsQualifiedName(d) { + continue + } _, _, _, err := Device(d) if err != nil { return err diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index 6c9d58485c6..b5838072ebb 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -8,6 +8,10 @@ const ( // DefaultContainersConfig holds the default containers config path DefaultContainersConfig = "/usr/share/" + _configPath + + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/etc/containers/policy.json" ) // podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index d1ac7c0e8c9..83396173589 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -52,19 +52,18 @@ # List of default capabilities for containers. If it is empty or commented out, # the default capabilities defined in the container engine will be added. # -default_capabilities = [ - "CHOWN", - "DAC_OVERRIDE", - "FOWNER", - "FSETID", - "KILL", - "NET_BIND_SERVICE", - "SETFCAP", - "SETGID", - "SETPCAP", - "SETUID", - "SYS_CHROOT" -] +#default_capabilities = [ +# "CHOWN", +# "DAC_OVERRIDE", +# "FOWNER", +# "FSETID", +# "KILL", +# "NET_BIND_SERVICE", +# "SETFCAP", +# "SETGID", +# "SETPCAP", +# "SETUID", +#] # A list of sysctls to be set in containers by default, # specified as "name=value", @@ -216,6 +215,10 @@ default_sysctls = [ # #prepare_volume_on_create = false +# Run all containers with root file system mounted read-only +# +# read_only = false + # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. # @@ -244,12 +247,6 @@ default_sysctls = [ # #userns = "host" -# Number of UIDs to allocate for the automatic container creation. -# UIDs are allocated from the "container" UIDs listed in -# /etc/subuid & /etc/subgid -# -#userns_size = 65536 - # Default way to to create a UTS namespace for the container # Options are: # `private` Create private UTS Namespace for the container. @@ -263,6 +260,11 @@ default_sysctls = [ # If it is empty or commented out, no volumes will be added # #volumes = [] +# +#[engine.platform_to_oci_runtime] +#"wasi/wasm" = ["crun-wasm"] +#"wasi/wasm32" = ["crun-wasm"] +#"wasi/wasm64" = ["crun-wasm"] [secrets] #driver = "file" @@ -411,6 +413,10 @@ default_sysctls = [ # #events_logger = "journald" +# Creates a more verbose container-create event which includes a JSON payload +# with detailed information about the container. +#events_container_create_inspect_data = false + # A is a list of directories which are used to search for helper binaries. # #helper_binaries_dir = [ @@ -543,7 +549,7 @@ default_sysctls = [ # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # -#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun"] # List of the OCI runtimes that supports running containers with KVM Separation. # @@ -581,7 +587,7 @@ default_sysctls = [ # map of service destinations # -#[service_destinations] +# [service_destinations] # [service_destinations.production] # URI to access the Podman service # Examples: @@ -605,6 +611,12 @@ default_sysctls = [ # #volume_path = "/var/lib/containers/storage/volumes" +# Default timeout (in seconds) for volume plugin operations. +# Plugins are external programs accessed via a REST API; this sets a timeout +# for requests to that API. +# A value of 0 is treated as no timeout. +#volume_plugin_timeout = 5 + # Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) [engine.runtimes] #crun = [ @@ -648,6 +660,13 @@ default_sysctls = [ # "/run/current-system/sw/bin/runsc", #] +#youki = [ +# "/usr/local/bin/youki", +# "/usr/bin/youki", +# "/bin/youki", +# "/run/current-system/sw/bin/youki", +#] + #krun = [ # "/usr/bin/krun", # "/usr/local/bin/krun", @@ -665,9 +684,16 @@ default_sysctls = [ # #disk_size=10 -# The image used when creating a podman-machine VM. +# Default image URI when creating a new VM using `podman machine init`. +# Options: On Linux/Mac, `testing`, `stable`, `next`. On Windows, the major +# version of the OS (e.g `36`) for Fedora 36. For all platforms you can +# alternatively specify a custom download URL to an image. Container engines +# translate URIs $OS and $ARCH to the native OS and ARCH. URI +# "https://example.com/$OS/$ARCH/foobar.ami" becomes +# "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine. +# The default value is `testing`. # -#image = "testing" +# image = "testing" # Memory in MB a machine is created with. # diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 50480fe73f9..0ac0f325e7d 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -212,12 +212,6 @@ default_sysctls = [ # #userns = "host" -# Number of UIDs to allocate for the automatic container creation. -# UIDs are allocated from the "container" UIDs listed in -# /etc/subuid & /etc/subgid -# -#userns_size = 65536 - # Default way to to create a UTS namespace for the container # Options are: # `private` Create private UTS Namespace for the container. @@ -485,7 +479,7 @@ default_sysctls = [ # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # -#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun"] # List of the OCI runtimes that supports running containers with KVM Separation. # @@ -590,6 +584,13 @@ default_sysctls = [ # "/run/current-system/sw/bin/runsc", #] +#youki = [ +# "/usr/local/bin/youki", +# "/usr/bin/youki", +# "/bin/youki", +# "/run/current-system/sw/bin/youki", +#] + #krun = [ # "/usr/bin/krun", # "/usr/local/bin/krun", diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 161a9c8d6d3..e27f630647e 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -1,15 +1,11 @@ package config import ( - "bytes" "errors" "fmt" "net" "os" - "os/exec" "path/filepath" - "regexp" - "strconv" "strings" nettypes "github.com/containers/common/libnetwork/types" @@ -24,19 +20,6 @@ import ( ) const ( - // _conmonMinMajorVersion is the major version required for conmon. - _conmonMinMajorVersion = 2 - - // _conmonMinMinorVersion is the minor version required for conmon. - _conmonMinMinorVersion = 0 - - // _conmonMinPatchVersion is the sub-minor version required for conmon. - _conmonMinPatchVersion = 1 - - // _conmonVersionFormatErr is used when the expected versio-format of conmon - // has changed. - _conmonVersionFormatErr = "conmon version changed format: %w" - // _defaultGraphRoot points to the default path of the graph root. _defaultGraphRoot = "/var/lib/containers/storage" @@ -67,20 +50,16 @@ var ( DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} // DefaultCapabilities is the default for the default_capabilities option in the containers.conf file. DefaultCapabilities = []string{ - "CAP_AUDIT_WRITE", "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_KILL", - "CAP_MKNOD", "CAP_NET_BIND_SERVICE", - "CAP_NET_RAW", "CAP_SETFCAP", "CAP_SETGID", "CAP_SETPCAP", "CAP_SETUID", - "CAP_SYS_CHROOT", } // Search these locations in which CNIPlugins can be installed. @@ -140,9 +119,6 @@ const ( DefaultPidsLimit = 2048 // DefaultPullPolicy pulls the image if it does not exist locally. DefaultPullPolicy = "missing" - // DefaultSignaturePolicyPath is the default value for the - // policy.json file. - DefaultSignaturePolicyPath = "/etc/containers/policy.json" // DefaultSubnet is the subnet that will be used for the default // network. DefaultSubnet = "10.88.0.0/16" @@ -152,6 +128,7 @@ const ( // DefaultShmSize is the default upper limit on the size of tmpfs mounts. DefaultShmSize = "65536k" // DefaultUserNSSize indicates the default number of UIDs allocated for user namespace within a container. + // Deprecated: no user of this field is known. DefaultUserNSSize = 65536 // OCIBufSize limits maximum LogSizeMax. OCIBufSize = 8192 @@ -159,6 +136,8 @@ const ( SeccompOverridePath = _etcDir + "/containers/seccomp.json" // SeccompDefaultPath defines the default seccomp path. SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" + // DefaultVolumePluginTimeout is the default volume plugin timeout, in seconds + DefaultVolumePluginTimeout = 5 ) // DefaultConfig defines the default values from containers.conf. @@ -169,7 +148,7 @@ func DefaultConfig() (*Config, error) { } defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath - if unshare.IsRootless() { + if useUserConfigLocations() { configHome, err := homedir.GetConfigHome() if err != nil { return nil, err @@ -224,7 +203,7 @@ func DefaultConfig() (*Config, error) { TZ: "", Umask: "0022", UTSNS: "private", - UserNSSize: DefaultUserNSSize, + UserNSSize: DefaultUserNSSize, // Deprecated }, Network: NetworkConfig{ DefaultNetwork: "podman", @@ -255,7 +234,7 @@ func defaultMachineConfig() MachineConfig { Image: getDefaultMachineImage(), Memory: 2048, User: getDefaultMachineUser(), - Volumes: []string{"$HOME:$HOME"}, + Volumes: getDefaultMachineVolumes(), } } @@ -269,16 +248,16 @@ func defaultConfigFromMemory() (*EngineConfig, error) { } c.TmpDir = tmp - c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") - c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) c.CompatAPIEnforceDockerHub = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { - types.SetDefaultConfigFilePath(path) + if err := types.SetDefaultConfigFilePath(path); err != nil { + return nil, err + } } - storeOpts, err := types.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID()) + storeOpts, err := types.DefaultStoreOptions(useUserConfigLocations(), unshare.GetRootlessUID()) if err != nil { return nil, err } @@ -287,11 +266,14 @@ func defaultConfigFromMemory() (*EngineConfig, error) { logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) storeOpts.GraphRoot = _defaultGraphRoot } + c.graphRoot = storeOpts.GraphRoot c.ImageCopyTmpDir = getDefaultTmpDir() c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") + c.VolumePluginTimeout = DefaultVolumePluginTimeout + c.HelperBinariesDir = defaultHelperBinariesDir if additionalHelperBinariesDir != "" { c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) @@ -318,6 +300,15 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/bin/crun", "/run/current-system/sw/bin/crun", }, + "crun-wasm": { + "/usr/bin/crun-wasm", + "/usr/sbin/crun-wasm", + "/usr/local/bin/crun-wasm", + "/usr/local/sbin/crun-wasm", + "/sbin/crun-wasm", + "/bin/crun-wasm", + "/run/current-system/sw/bin/crun-wasm", + }, "runc": { "/usr/bin/runc", "/usr/sbin/runc", @@ -350,10 +341,24 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/sbin/runsc", "/run/current-system/sw/bin/runsc", }, + "youki": { + "/usr/local/bin/youki", + "/usr/bin/youki", + "/bin/youki", + "/run/current-system/sw/bin/youki", + }, "krun": { "/usr/bin/krun", "/usr/local/bin/krun", }, + "ocijail": { + "/usr/local/bin/ocijail", + }, + } + c.PlatformToOCIRuntime = map[string]string{ + "wasi/wasm": "crun-wasm", + "wasi/wasm32": "crun-wasm", + "wasi/wasm64": "crun-wasm", } // Needs to be called after populating c.OCIRuntimes. c.OCIRuntime = c.findRuntime() @@ -371,12 +376,23 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/usr/local/sbin/conmon", "/run/current-system/sw/bin/conmon", } + c.ConmonRsPath = []string{ + "/usr/libexec/podman/conmonrs", + "/usr/local/libexec/podman/conmonrs", + "/usr/local/lib/podman/conmonrs", + "/usr/bin/conmonrs", + "/usr/sbin/conmonrs", + "/usr/local/bin/conmonrs", + "/usr/local/sbin/conmonrs", + "/run/current-system/sw/bin/conmonrs", + } c.PullPolicy = DefaultPullPolicy c.RuntimeSupportsJSON = []string{ "crun", "runc", "kata", "runsc", + "youki", "krun", } c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} @@ -397,12 +413,13 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.ChownCopiedFiles = true c.PodExitPolicy = defaultPodExitPolicy + c.SSHConfig = getDefaultSSHConfig() return c, nil } func defaultTmpDir() (string, error) { - if !unshare.IsRootless() { + if !useUserConfigLocations() { return getLibpodTmpDir(), nil } @@ -423,57 +440,6 @@ func defaultTmpDir() (string, error) { return filepath.Join(libpodRuntimeDir, "tmp"), nil } -// probeConmon calls conmon --version and verifies it is a new enough version for -// the runtime expectations the container engine currently has. -func probeConmon(conmonBinary string) error { - cmd := exec.Command(conmonBinary, "--version") - var out bytes.Buffer - cmd.Stdout = &out - if err := cmd.Run(); err != nil { - return err - } - r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) - - matches := r.FindStringSubmatch(out.String()) - if len(matches) != 4 { - return errors.New(_conmonVersionFormatErr) - } - major, err := strconv.Atoi(matches[1]) - if err != nil { - return fmt.Errorf(_conmonVersionFormatErr, err) - } - if major < _conmonMinMajorVersion { - return ErrConmonOutdated - } - if major > _conmonMinMajorVersion { - return nil - } - - minor, err := strconv.Atoi(matches[2]) - if err != nil { - return fmt.Errorf(_conmonVersionFormatErr, err) - } - if minor < _conmonMinMinorVersion { - return ErrConmonOutdated - } - if minor > _conmonMinMinorVersion { - return nil - } - - patch, err := strconv.Atoi(matches[3]) - if err != nil { - return fmt.Errorf(_conmonVersionFormatErr, err) - } - if patch < _conmonMinPatchVersion { - return ErrConmonOutdated - } - if patch > _conmonMinPatchVersion { - return nil - } - - return nil -} - // NetNS returns the default network namespace. func (c *Config) NetNS() string { return c.Containers.NetNS @@ -633,3 +599,17 @@ func machineVolumes(volumes []string) ([]string, error) { } return translatedVolumes, nil } + +func getDefaultSSHConfig() string { + if path, ok := os.LookupEnv("CONTAINERS_SSH_CONF"); ok { + return path + } + dirname := homedir.Get() + return filepath.Join(dirname, ".ssh", "config") +} + +func useUserConfigLocations() bool { + // NOTE: For now we want Windows to use system locations. + // GetRootlessUID == -1 on Windows, so exclude negative range + return unshare.GetRootlessUID() > 0 +} diff --git a/vendor/github.com/containers/common/pkg/config/default_darwin.go b/vendor/github.com/containers/common/pkg/config/default_darwin.go index c502ea55e26..75576662096 100644 --- a/vendor/github.com/containers/common/pkg/config/default_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/default_darwin.go @@ -11,3 +11,12 @@ func getDefaultLockType() string { func getLibpodTmpDir() string { return "/run/libpod" } + +// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +func getDefaultMachineVolumes() []string { + return []string{ + "/Users:/Users", + "/private:/private", + "/var/folders:/var/folders", + } +} diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go index 8b10ac1f7b2..f3c999bed2d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -1,13 +1,13 @@ package config func getDefaultCgroupsMode() string { - return "disabled" + return "enabled" } // In theory, FreeBSD should be able to use shm locks but in practice, // this causes cryptic error messages from the kernel that look like: // -// comm podman pid 90813: handling rb error 22 +// comm podman pid 90813: handling rb error 22 // // These seem to be related to fork/exec code paths. Fall back to // file-based locks. @@ -18,3 +18,8 @@ func getDefaultLockType() string { func getLibpodTmpDir() string { return "/var/run/libpod" } + +// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +func getDefaultMachineVolumes() []string { + return []string{"$HOME:$HOME"} +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index 86873beb1f9..d4d04764a14 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -2,7 +2,6 @@ package config import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -37,7 +36,7 @@ func getDefaultProcessLimits() []string { rlim := unix.Rlimit{Cur: oldMaxSize, Max: oldMaxSize} oldrlim := rlim // Attempt to set file limit and process limit to pid_max in OS - dat, err := ioutil.ReadFile("/proc/sys/kernel/pid_max") + dat, err := os.ReadFile("/proc/sys/kernel/pid_max") if err == nil { val := strings.TrimSuffix(string(dat), "\n") max, err := strconv.ParseUint(val, 10, 64) @@ -70,3 +69,8 @@ func getDefaultLockType() string { func getLibpodTmpDir() string { return "/run/libpod" } + +// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +func getDefaultMachineVolumes() []string { + return []string{"$HOME:$HOME"} +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 1ff88fc4252..08a0bf22395 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -44,3 +44,8 @@ func getDefaultLockType() string { func getLibpodTmpDir() string { return "/run/libpod" } + +// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +func getDefaultMachineVolumes() []string { + return []string{} +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index 03d19a12f30..3cd6ff84515 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -4,7 +4,7 @@ package config import ( - "io/ioutil" + "os" "path/filepath" "strings" "sync" @@ -53,7 +53,7 @@ func defaultLogDriver() string { func useSystemd() bool { systemdOnce.Do(func() { - dat, err := ioutil.ReadFile("/proc/1/comm") + dat, err := os.ReadFile("/proc/1/comm") if err == nil { val := strings.TrimSuffix(string(dat), "\n") usesSystemd = (val == "systemd") @@ -68,13 +68,13 @@ func useJournald() bool { return } for _, root := range []string{"/run/log/journal", "/var/log/journal"} { - dirs, err := ioutil.ReadDir(root) + dirs, err := os.ReadDir(root) if err != nil { continue } for _, d := range dirs { if d.IsDir() { - if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + if _, err := os.ReadDir(filepath.Join(root, d.Name())); err == nil { usesJournald = true return } diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go index abf4c87739e..777a1152202 100644 --- a/vendor/github.com/containers/common/pkg/download/download.go +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -3,14 +3,14 @@ package download import ( "fmt" "io" - "io/ioutil" "net/http" + "os" ) // FromURL downloads the specified source to a file in tmpdir (OS defaults if // empty). func FromURL(tmpdir, source string) (string, error) { - tmp, err := ioutil.TempFile(tmpdir, "") + tmp, err := os.CreateTemp(tmpdir, "") if err != nil { return "", fmt.Errorf("creating temporary download file: %w", err) } diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index f8b0066e5c6..729061dbcad 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -38,6 +38,7 @@ func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { // background. // // revive does not like the name because the package is already called filters +// //nolint:revive func FiltersFromRequest(r *http.Request) ([]string, error) { var ( diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go index 37e89a08ec3..36428e58f40 100644 --- a/vendor/github.com/containers/common/pkg/machine/machine.go +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -10,6 +10,7 @@ import ( ) // TODO: change name to MachineMarker since package is already called machine +// //nolint:revive type MachineMarker struct { Enabled bool @@ -57,6 +58,7 @@ func IsPodmanMachine() bool { } // TODO: change name to HostType since package is already called machine +// //nolint:revive func MachineHostType() string { return GetMachineMarker().Type diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index d2279ab0ea1..d351bdf17f2 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -452,23 +452,23 @@ func (l *list) Serialize(mimeType string) ([]byte, error) { if l.preferOCI() { res, err = json.Marshal(&l.oci) if err != nil { - return nil, fmt.Errorf("error marshalling OCI image index: %w", err) + return nil, fmt.Errorf("marshalling OCI image index: %w", err) } } else { res, err = json.Marshal(&l.docker) if err != nil { - return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err) + return nil, fmt.Errorf("marshalling Docker manifest list: %w", err) } } case v1.MediaTypeImageIndex: res, err = json.Marshal(&l.oci) if err != nil { - return nil, fmt.Errorf("error marshalling OCI image index: %w", err) + return nil, fmt.Errorf("marshalling OCI image index: %w", err) } case manifest.DockerV2ListMediaType: res, err = json.Marshal(&l.docker) if err != nil { - return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err) + return nil, fmt.Errorf("marshalling Docker manifest list: %w", err) } default: return nil, fmt.Errorf("serializing list to type %q not implemented: %w", mimeType, ErrManifestTypeNotSupported) diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 15e932129b2..7629f584215 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -103,10 +103,11 @@ func ValidateVolumeOpts(options []string) ([]string, error) { // Device parses device mapping string to a src, dest & permissions string // Valid values for device looklike: -// '/dev/sdc" -// '/dev/sdc:/dev/xvdc" -// '/dev/sdc:/dev/xvdc:rwm" -// '/dev/sdc:rm" +// +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" func Device(device string) (src, dest, permissions string, err error) { permissions = "rwm" arr := strings.Split(device, ":") diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index 8b3599229a3..44fe33d9ef4 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -39,7 +39,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) { // If source device is a directory srcDevices, err := devices.GetDevices(src) if err != nil { - return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err) + return nil, fmt.Errorf("getting source devices from directory %s: %w", src, err) } devs := make([]devices.Device, 0, len(srcDevices)) for _, d := range srcDevices { diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go new file mode 100644 index 00000000000..b9c464478d2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -0,0 +1,403 @@ +package ssh + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/url" + "os" + "os/user" + "path" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/homedir" + "github.com/pkg/sftp" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" +) + +func golangConnectionCreate(options ConnectionCreateOptions) error { + var match bool + var err error + if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + return fmt.Errorf("invalid destination: %w", err) + } + + if !match { + options.Path = "ssh://" + options.Path + } + + if len(options.Socket) > 0 { + options.Path += options.Socket + } + + dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity) + if err != nil { + return err + } + + if uri.Path == "" || uri.Path == "/" { + if uri.Path, err = getUDS(uri, options.Identity); err != nil { + return err + } + dst.URI += uri.Path + } + + cfg, err := config.ReadCustomConfig() + if err != nil { + return err + } + if cfg.Engine.ServiceDestinations == nil { + cfg.Engine.ServiceDestinations = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Engine.ActiveService = options.Name + } else { + cfg.Engine.ServiceDestinations[options.Name] = *dst + } + return cfg.Write() +} + +func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, error) { + _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + cfg, err := ValidateAndConfigure(uri, options.Identity, options.InsecureIsMachineConnection) + if err != nil { + return nil, err + } + + dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + + return &ConnectionDialReport{dial}, nil +} + +func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { + if !strings.HasPrefix(options.Host, "ssh://") { + options.Host = "ssh://" + options.Host + } + _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + cfg, err := ValidateAndConfigure(uri, options.Identity, false) + if err != nil { + return nil, err + } + dialAdd, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + + out, err := ExecRemoteCommand(dialAdd, strings.Join(options.Args, " ")) + if err != nil { + return nil, err + } + return &ConnectionExecReport{Response: string(out)}, nil +} + +func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) { + host, remoteFile, localFile, swap, err := ParseScpArgs(options) + if err != nil { + return nil, err + } + + // removed for parsing + if !strings.HasPrefix(host, "ssh://") { + host = "ssh://" + host + } + _, uri, err := Validate(options.User, host, options.Port, options.Identity) + if err != nil { + return nil, err + } + cfg, err := ValidateAndConfigure(uri, options.Identity, false) + if err != nil { + return nil, err + } + + dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + sc, err := sftp.NewClient(dial) + if err != nil { + return nil, err + } + + f, err := os.OpenFile(localFile, (os.O_RDWR | os.O_CREATE), 0o644) + if err != nil { + return nil, err + } + + parent := filepath.Dir(remoteFile) + path := string(filepath.Separator) + dirs := strings.Split(parent, path) + for _, dir := range dirs { + path = filepath.Join(path, dir) + // ignore errors due to most of the dirs already existing + _ = sc.Mkdir(path) + } + + remote, err := sc.OpenFile(remoteFile, (os.O_RDWR | os.O_CREATE)) + if err != nil { + return nil, err + } + defer remote.Close() + + if !swap { + _, err = io.Copy(remote, f) + if err != nil { + return nil, err + } + } else { + _, err = io.Copy(f, remote) + if err != nil { + return nil, err + } + } + return &ConnectionScpReport{Response: remote.Name()}, nil +} + +// ExecRemoteCommand takes a ssh client connection and a command to run and executes the +// command on the specified client. The function returns the Stdout from the client or the Stderr +func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { + sess, err := dial.NewSession() // new ssh client session + if err != nil { + return nil, err + } + defer sess.Close() + + var buffer bytes.Buffer + var bufferErr bytes.Buffer + sess.Stdout = &buffer // output from client funneled into buffer + sess.Stderr = &bufferErr // err form client funneled into buffer + if err := sess.Run(run); err != nil { // run the command on the ssh client + return nil, fmt.Errorf("%v: %w", bufferErr.String(), err) + } + return buffer.Bytes(), nil +} + +func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { + var ( + usr *user.User + err error + ) + if u, found := os.LookupEnv("_CONTAINERS_ROOTLESS_UID"); found { + usr, err = user.LookupId(u) + if err != nil { + return nil, fmt.Errorf("failed to lookup rootless user: %w", err) + } + } else { + usr, err = user.Current() + if err != nil { + return nil, fmt.Errorf("failed to obtain current user: %w", err) + } + } + + pw, set := uri.User.Password() + if set { + return url.UserPassword(usr.Username, pw), nil + } + return url.User(usr.Username), nil +} + +// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid +// iden iden can be blank to mean no identity key +// once the function validates the information it creates and returns an ssh.ClientConfig. +func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection bool) (*ssh.ClientConfig, error) { + var signers []ssh.Signer + passwd, passwdSet := uri.User.Password() + if iden != "" { // iden might be blank if coming from image scp or if no validation is needed + value := iden + s, err := PublicKey(value, []byte(passwd)) + if err != nil { + return nil, fmt.Errorf("failed to read identity %q: %w", value, err) + } + signers = append(signers, s) + logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } else if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent. + logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock) + + c, err := net.Dial("unix", sock) + if err != nil { + return nil, err + } + agentSigners, err := agent.NewClient(c).Signers() + if err != nil { + return nil, err + } + + signers = append(signers, agentSigners...) + + if logrus.IsLevelEnabled(logrus.DebugLevel) { + for _, s := range agentSigners { + logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + } + } + var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization + if len(signers) > 0 { + dedup := make(map[string]ssh.Signer) + for _, s := range signers { + fp := ssh.FingerprintSHA256(s.PublicKey()) + if _, found := dedup[fp]; found { + logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + dedup[fp] = s + } + + var uniq []ssh.Signer + for _, s := range dedup { + uniq = append(uniq, s) + } + authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { + return uniq, nil + })) + } + if passwdSet { // if password authentication is given and valid, add to the list + authMethods = append(authMethods, ssh.Password(passwd)) + } + if len(authMethods) == 0 { + authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) { + pass, err := ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username())) + return string(pass), err + })) + } + tick, err := time.ParseDuration("40s") + if err != nil { + return nil, err + } + + var callback ssh.HostKeyCallback + if insecureIsMachineConnection { + callback = ssh.InsecureIgnoreHostKey() + } else { + callback = ssh.HostKeyCallback(func(host string, remote net.Addr, pubKey ssh.PublicKey) error { + keyFilePath := filepath.Join(homedir.Get(), ".ssh", "known_hosts") + known, err := knownhosts.New(keyFilePath) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + keyDir := path.Dir(keyFilePath) + if _, err := os.Stat(keyDir); errors.Is(err, os.ErrNotExist) { + if err := os.Mkdir(keyDir, 0o700); err != nil { + return err + } + } + k, err := os.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE, 0o600) + if err != nil { + return err + } + k.Close() + known, err = knownhosts.New(keyFilePath) + if err != nil { + return err + } + } + // we need to check if there is an error from reading known hosts for this public key and if there is an error, what is it, and why is it happening? + // if it is a key mismatch we want to error since we know the host using another key + // however, if it is a general error not because of a known key, we want to add our key to the known_hosts file + hErr := known(host, remote, pubKey) + var keyErr *knownhosts.KeyError + // if keyErr.Want is not empty, we are receiving a different key meaning the host is known but we are using the wrong key + as := errors.As(hErr, &keyErr) + switch { + case as && len(keyErr.Want) > 0: + logrus.Warnf("ssh host key mismatch for host %s, got key %s of type %s", host, ssh.FingerprintSHA256(pubKey), pubKey.Type()) + return keyErr + // if keyErr.Want is empty that just means we do not know this host yet, add it. + case as && len(keyErr.Want) == 0: + // write to known_hosts + err := addKnownHostsEntry(host, pubKey) + if err != nil { + if os.IsNotExist(err) { + logrus.Warn("podman will soon require a known_hosts file to function properly.") + return nil + } + return err + } + case hErr != nil: + return hErr + } + return nil + }) + } + + cfg := &ssh.ClientConfig{ + User: uri.User.Username(), + Auth: authMethods, + HostKeyCallback: callback, + Timeout: tick, + } + return cfg, nil +} + +func getUDS(uri *url.URL, iden string) (string, error) { + cfg, err := ValidateAndConfigure(uri, iden, false) + if err != nil { + return "", fmt.Errorf("failed to validate: %w", err) + } + dial, err := ssh.Dial("tcp", uri.Host, cfg) + if err != nil { + return "", fmt.Errorf("failed to connect: %w", err) + } + defer dial.Close() + + session, err := dial.NewSession() + if err != nil { + return "", fmt.Errorf("failed to create new ssh session on %q: %w", uri.Host, err) + } + defer session.Close() + + // Override podman binary for testing etc + podman := "podman" + if v, found := os.LookupEnv("PODMAN_BINARY"); found { + podman = v + } + infoJSON, err := ExecRemoteCommand(dial, podman+" info --format=json") + if err != nil { + return "", err + } + + var info Info + if err := json.Unmarshal(infoJSON, &info); err != nil { + return "", fmt.Errorf("failed to parse 'podman info' results: %w", err) + } + + if info.Host.RemoteSocket == nil || len(info.Host.RemoteSocket.Path) == 0 { + return "", fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) + } + return info.Host.RemoteSocket.Path, nil +} + +// addKnownHostsEntry adds (host, pubKey) to user’s known_hosts. +func addKnownHostsEntry(host string, pubKey ssh.PublicKey) error { + hd := homedir.Get() + known := filepath.Join(hd, ".ssh", "known_hosts") + f, err := os.OpenFile(known, os.O_APPEND|os.O_WRONLY, 0o600) + if err != nil { + return err + } + defer f.Close() + l := knownhosts.Line([]string{host}, pubKey) + if _, err = f.WriteString("\n" + l + "\n"); err != nil { + return err + } + logrus.Infof("key %s added to %s", ssh.FingerprintSHA256(pubKey), known) + return nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go new file mode 100644 index 00000000000..4c407360a3d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go @@ -0,0 +1,182 @@ +package ssh + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/containers/common/pkg/config" +) + +func nativeConnectionCreate(options ConnectionCreateOptions) error { + var match bool + var err error + if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + return fmt.Errorf("invalid destination: %w", err) + } + + if !match { + options.Path = "ssh://" + options.Path + } + + if len(options.Socket) > 0 { + options.Path += options.Socket + } + + dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity) + if err != nil { + return err + } + + // test connection + ssh, err := exec.LookPath("ssh") + if err != nil { + return fmt.Errorf("no ssh binary found") + } + + if strings.Contains(uri.Host, "/run") { + uri.Host = strings.Split(uri.Host, "/run")[0] + } + conf, err := config.Default() + if err != nil { + return err + } + + args := []string{uri.User.String() + "@" + uri.Hostname()} + + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + + output := &bytes.Buffer{} + args = append(args, "podman", "info", "--format", "json") + info := exec.Command(ssh, args...) + info.Stdout = output + err = info.Run() + if err != nil { + return err + } + + remoteInfo := &Info{} + if err := json.Unmarshal(output.Bytes(), &remoteInfo); err != nil { + return fmt.Errorf("failed to parse 'podman info' results: %w", err) + } + + if remoteInfo.Host.RemoteSocket == nil || len(remoteInfo.Host.RemoteSocket.Path) == 0 { + return fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) + } + + cfg, err := config.ReadCustomConfig() + if err != nil { + return err + } + if options.Default { + cfg.Engine.ActiveService = options.Name + } + + if cfg.Engine.ServiceDestinations == nil { + cfg.Engine.ServiceDestinations = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Engine.ActiveService = options.Name + } else { + cfg.Engine.ServiceDestinations[options.Name] = *dst + } + + return cfg.Write() +} + +func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { + dst, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + ssh, err := exec.LookPath("ssh") + if err != nil { + return nil, fmt.Errorf("no ssh binary found") + } + + output := &bytes.Buffer{} + errors := &bytes.Buffer{} + if strings.Contains(uri.Host, "/run") { + uri.Host = strings.Split(uri.Host, "/run")[0] + } + + options.Args = append([]string{uri.User.String() + "@" + uri.Hostname()}, options.Args...) + conf, err := config.Default() + if err != nil { + return nil, err + } + + args := []string{} + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + args = append(args, options.Args...) + info := exec.Command(ssh, args...) + info.Stdout = output + info.Stderr = errors + err = info.Run() + if err != nil { + return nil, err + } + return &ConnectionExecReport{Response: output.String()}, nil +} + +func nativeConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) { + host, remotePath, localPath, swap, err := ParseScpArgs(options) + if err != nil { + return nil, err + } + dst, uri, err := Validate(options.User, host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + scp, err := exec.LookPath("scp") + if err != nil { + return nil, fmt.Errorf("no scp binary found") + } + + conf, err := config.Default() + if err != nil { + return nil, err + } + + args := []string{} + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + + userString := "" + if !strings.Contains(host, "@") { + userString = uri.User.String() + "@" + } + // meaning, we are copying from a remote host + if swap { + args = append(args, userString+host+":"+remotePath, localPath) + } else { + args = append(args, localPath, userString+host+":"+remotePath) + } + + info := exec.Command(scp, args...) + err = info.Run() + if err != nil { + return nil, err + } + + return &ConnectionScpReport{Response: remotePath}, nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/ssh.go b/vendor/github.com/containers/common/pkg/ssh/ssh.go new file mode 100644 index 00000000000..d638d69ad9f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/ssh.go @@ -0,0 +1,59 @@ +package ssh + +import ( + "fmt" + + "golang.org/x/crypto/ssh" +) + +func Create(options *ConnectionCreateOptions, kind EngineMode) error { + if kind == NativeMode { + return nativeConnectionCreate(*options) + } + return golangConnectionCreate(*options) +} + +func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) { + var rep *ConnectionDialReport + var err error + if kind == NativeMode { + return nil, fmt.Errorf("ssh dial failed: you cannot create a dial-able client with native ssh") + } + rep, err = golangConnectionDial(*options) + if err != nil { + return nil, err + } + return rep.Client, nil +} + +func Exec(options *ConnectionExecOptions, kind EngineMode) (string, error) { + var rep *ConnectionExecReport + var err error + if kind == NativeMode { + rep, err = nativeConnectionExec(*options) + if err != nil { + return "", err + } + } else { + rep, err = golangConnectionExec(*options) + if err != nil { + return "", err + } + } + return rep.Response, nil +} + +func Scp(options *ConnectionScpOptions, kind EngineMode) (string, error) { + var rep *ConnectionScpReport + var err error + if kind == NativeMode { + if rep, err = nativeConnectionScp(*options); err != nil { + return "", err + } + return rep.Response, nil + } + if rep, err = golangConnectionScp(*options); err != nil { + return "", err + } + return rep.Response, nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go new file mode 100644 index 00000000000..16512c43f17 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/types.go @@ -0,0 +1,224 @@ +package ssh + +import ( + "net/url" + "time" + + "github.com/containers/storage/pkg/idtools" + "golang.org/x/crypto/ssh" +) + +type EngineMode string + +const ( + NativeMode = EngineMode("native") + GolangMode = EngineMode("golang") + InvalidMode = EngineMode("invalid") +) + +type ConnectionCreateOptions struct { + Name string + Path string + User *url.Userinfo + Port int + Identity string + Socket string + Default bool +} + +type ConnectionDialOptions struct { + Host string + Identity string + User *url.Userinfo + Port int + Auth []string + Timeout time.Duration + InsecureIsMachineConnection bool +} + +type ConnectionDialReport struct { + Client *ssh.Client +} + +type ConnectionExecOptions struct { + Host string + Identity string + User *url.Userinfo + Port int + Auth []string + Args []string + Timeout time.Duration +} + +type ConnectionExecReport struct { + Response string +} + +type ConnectionScpOptions struct { + User *url.Userinfo + Source string + Destination string + Identity string + Port int +} + +type ConnectionScpReport struct { + Response string +} + +// Info is the overall struct that describes the host system +// running libpod/podman +type Info struct { + Host *HostInfo `json:"host"` + Store *StoreInfo `json:"store"` + Registries map[string]interface{} `json:"registries"` + Plugins Plugins `json:"plugins"` + Version Version `json:"version"` +} + +// Version is an output struct for API +type Version struct { + APIVersion string + Version string + GoVersion string + GitCommit string + BuiltTime string + Built int64 + OsArch string + Os string +} + +// SecurityInfo describes the libpod host +type SecurityInfo struct { + AppArmorEnabled bool `json:"apparmorEnabled"` + DefaultCapabilities string `json:"capabilities"` + Rootless bool `json:"rootless"` + SECCOMPEnabled bool `json:"seccompEnabled"` + SECCOMPProfilePath string `json:"seccompProfilePath"` + SELinuxEnabled bool `json:"selinuxEnabled"` +} + +// HostInfo describes the libpod host +type HostInfo struct { + Arch string `json:"arch"` + BuildahVersion string `json:"buildahVersion"` + CgroupManager string `json:"cgroupManager"` + CgroupsVersion string `json:"cgroupVersion"` + CgroupControllers []string `json:"cgroupControllers"` + Conmon *ConmonInfo `json:"conmon"` + CPUs int `json:"cpus"` + CPUUtilization *CPUUsage `json:"cpuUtilization"` + Distribution DistributionInfo `json:"distribution"` + EventLogger string `json:"eventLogger"` + Hostname string `json:"hostname"` + IDMappings IDMappings `json:"idMappings,omitempty"` + Kernel string `json:"kernel"` + LogDriver string `json:"logDriver"` + MemFree int64 `json:"memFree"` + MemTotal int64 `json:"memTotal"` + NetworkBackend string `json:"networkBackend"` + OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` + OS string `json:"os"` + // RemoteSocket returns the UNIX domain socket the Podman service is listening on + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + // ServiceIsRemote is true when the podman/libpod service is remote to the client + ServiceIsRemote bool `json:"serviceIsRemote"` + Security SecurityInfo `json:"security"` + Slirp4NetNS SlirpInfo `json:"slirp4netns,omitempty"` + SwapFree int64 `json:"swapFree"` + SwapTotal int64 `json:"swapTotal"` + Uptime string `json:"uptime"` + Linkmode string `json:"linkmode"` +} + +// RemoteSocket describes information about the API socket +type RemoteSocket struct { + Path string `json:"path,omitempty"` + Exists bool `json:"exists,omitempty"` +} + +// SlirpInfo describes the slirp executable that is being used +type SlirpInfo struct { + Executable string `json:"executable"` + Package string `json:"package"` + Version string `json:"version"` +} + +// IDMappings describe the GID and UID mappings +type IDMappings struct { + GIDMap []idtools.IDMap `json:"gidmap"` + UIDMap []idtools.IDMap `json:"uidmap"` +} + +// DistributionInfo describes the host distribution for libpod +type DistributionInfo struct { + Distribution string `json:"distribution"` + Variant string `json:"variant,omitempty"` + Version string `json:"version"` + Codename string `json:"codename,omitempty"` +} + +// ConmonInfo describes the conmon executable being used +type ConmonInfo struct { + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// OCIRuntimeInfo describes the runtime (crun or runc) being +// used with podman +type OCIRuntimeInfo struct { + Name string `json:"name"` + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// StoreInfo describes the container storage and its +// attributes +type StoreInfo struct { + ConfigFile string `json:"configFile"` + ContainerStore ContainerStore `json:"containerStore"` + GraphDriverName string `json:"graphDriverName"` + GraphOptions map[string]interface{} `json:"graphOptions"` + GraphRoot string `json:"graphRoot"` + // GraphRootAllocated is how much space the graphroot has in bytes + GraphRootAllocated uint64 `json:"graphRootAllocated"` + // GraphRootUsed is how much of graphroot is used in bytes + GraphRootUsed uint64 `json:"graphRootUsed"` + GraphStatus map[string]string `json:"graphStatus"` + ImageCopyTmpDir string `json:"imageCopyTmpDir"` + ImageStore ImageStore `json:"imageStore"` + RunRoot string `json:"runRoot"` + VolumePath string `json:"volumePath"` +} + +// ImageStore describes the image store. Right now only the number +// of images present +type ImageStore struct { + Number int `json:"number"` +} + +// ContainerStore describes the quantity of containers in the +// store by status +type ContainerStore struct { + Number int `json:"number"` + Paused int `json:"paused"` + Running int `json:"running"` + Stopped int `json:"stopped"` +} + +type Plugins struct { + Volume []string `json:"volume"` + Network []string `json:"network"` + Log []string `json:"log"` + // Authorization is provided for compatibility, will always be nil as Podman has no daemon + Authorization []string `json:"authorization"` +} + +type CPUUsage struct { + UserPercent float64 `json:"userPercent"` + SystemPercent float64 `json:"systemPercent"` + IdlePercent float64 `json:"idlePercent"` +} diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go new file mode 100644 index 00000000000..d2b7d4a0289 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/utils.go @@ -0,0 +1,198 @@ +package ssh + +import ( + "fmt" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "golang.org/x/crypto/ssh" + "golang.org/x/term" +) + +func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) { + sock := "" + if strings.Contains(path, "/run") { + sock = strings.Split(path, "/run")[1] + } + // url.Parse NEEDS ssh://, if this ever fails or returns some nonsense, that is why. + uri, err := url.Parse(path) + if err != nil { + return nil, nil, err + } + + // sometimes we are not going to have a path, this breaks uri.Hostname() + if uri.Host == "" && strings.Contains(uri.String(), "@") { + uri.Host = strings.Split(uri.String(), "@")[1] + } + + if uri.Port() == "" { + if port != 0 { + uri.Host = net.JoinHostPort(uri.Host, strconv.Itoa(port)) + } else { + uri.Host = net.JoinHostPort(uri.Host, "22") + } + } + + if user != nil { + uri.User = user + } + + uriStr := "" + if len(sock) > 0 { + uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock + } else { + uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + } + + dst := config.Destination{ + URI: uriStr, + } + + if len(identity) > 0 { + dst.Identity = identity + } + return &dst, uri, err +} + +var ( + passPhrase []byte + phraseSync sync.Once + password []byte + passwordSync sync.Once +) + +// ReadPassword prompts for a secret and returns value input by user from stdin +// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported. +// Additionally, all input after `/n` is queued to podman command. +func ReadPassword(prompt string) (pw []byte, err error) { + fd := int(os.Stdin.Fd()) + if term.IsTerminal(fd) { + fmt.Fprint(os.Stderr, prompt) + pw, err = term.ReadPassword(fd) + fmt.Fprintln(os.Stderr) + return + } + + var b [1]byte + for { + n, err := os.Stdin.Read(b[:]) + // terminal.ReadPassword discards any '\r', so we do the same + if n > 0 && b[0] != '\r' { + if b[0] == '\n' { + return pw, nil + } + pw = append(pw, b[0]) + // limit size, so that a wrong input won't fill up the memory + if len(pw) > 1024 { + err = fmt.Errorf("password too long, 1024 byte limit") + } + } + if err != nil { + // terminal.ReadPassword accepts EOF-terminated passwords + // if non-empty, so we do the same + if err == io.EOF && len(pw) > 0 { + err = nil + } + return pw, err + } + } +} + +func PublicKey(path string, passphrase []byte) (ssh.Signer, error) { + key, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + if _, ok := err.(*ssh.PassphraseMissingError); !ok { + return nil, err + } + if len(passphrase) == 0 { + passphrase = ReadPassphrase() + } + return ssh.ParsePrivateKeyWithPassphrase(key, passphrase) + } + return signer, nil +} + +func ReadPassphrase() []byte { + phraseSync.Do(func() { + secret, err := ReadPassword("Key Passphrase: ") + if err != nil { + secret = []byte{} + } + passPhrase = secret + }) + return passPhrase +} + +func ReadLogin() []byte { + passwordSync.Do(func() { + secret, err := ReadPassword("Login password: ") + if err != nil { + secret = []byte{} + } + password = secret + }) + return password +} + +func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, error) { + // assume load to remote + host := options.Destination + if strings.Contains(host, "ssh://") { + host = strings.Split(host, "ssh://")[1] + } + localPath := options.Source + if strings.Contains(localPath, "ssh://") { + localPath = strings.Split(localPath, "ssh://")[1] + } + remotePath := "" + swap := false + if split := strings.Split(localPath, ":"); len(split) == 2 { + // save to remote, load to local + host = split[0] + remotePath = split[1] + localPath = options.Destination + swap = true + } else { + split = strings.Split(host, ":") + if len(split) != 2 { + return "", "", "", false, fmt.Errorf("no remote destination provided") + } + host = split[0] + remotePath = split[1] + } + remotePath = strings.TrimSuffix(remotePath, "\n") + return host, remotePath, localPath, swap, nil +} + +func DialNet(sshClient *ssh.Client, mode string, url *url.URL) (net.Conn, error) { + port, err := strconv.Atoi(url.Port()) + if err != nil { + return nil, err + } + if _, _, err = Validate(url.User, url.Hostname(), port, ""); err != nil { + return nil, err + } + return sshClient.Dial(mode, url.Path) +} + +func DefineMode(flag string) EngineMode { + switch flag { + case "native": + return NativeMode + case "golang": + return GolangMode + default: + return InvalidMode + } +} diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 84201c99874..58c9af65419 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -58,7 +58,7 @@ func Reference(ref types.ImageReference, supplemental []types.ImageReference, mu func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := s.NewImageSource(ctx, sys) if err != nil { - return nil, fmt.Errorf("error building a new Image using an ImageSource: %w", err) + return nil, fmt.Errorf("building a new Image using an ImageSource: %w", err) } return image.FromSource(ctx, sys, src) } @@ -75,7 +75,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Open the default instance for reading. top, err := s.ImageReference.NewImageSource(ctx, sys) if err != nil { - return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(s.ImageReference), err) + return nil, fmt.Errorf("opening %q as image source: %w", transports.ImageName(s.ImageReference), err) } defer func() { @@ -105,14 +105,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Mark this instance as being associated with this ImageSource. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return fmt.Errorf("error computing digest over manifest %q: %w", string(manifestBytes), err) + return fmt.Errorf("computing digest over manifest %q: %w", string(manifestBytes), err) } sources[manifestDigest] = src // Parse the manifest as a single image. man, err := manifest.FromBlob(manifestBytes, manifestType) if err != nil { - return fmt.Errorf("error parsing manifest %q: %w", string(manifestBytes), err) + return fmt.Errorf("parsing manifest %q: %w", string(manifestBytes), err) } // Log the config blob's digest and the blobs of its layers as associated with this manifest. @@ -135,14 +135,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Mark this instance as being associated with this ImageSource. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return fmt.Errorf("error computing manifest digest: %w", err) + return fmt.Errorf("computing manifest digest: %w", err) } sources[manifestDigest] = src // Parse the manifest as a list of images. list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err) + return fmt.Errorf("parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err) } // Figure out which of its instances we want to look at. @@ -151,7 +151,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty case cp.CopySystemImage: instance, err := list.ChooseInstance(sys) if err != nil { - return fmt.Errorf("error selecting appropriate instance from list: %w", err) + return fmt.Errorf("selecting appropriate instance from list: %w", err) } chaseInstances = []digest.Digest{instance} case cp.CopySpecificImages: @@ -194,14 +194,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } else { src, err = ref.NewImageSource(ctx, sys) if err != nil { - return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(ref), err) + return nil, fmt.Errorf("opening %q as image source: %w", transports.ImageName(ref), err) } } // Read the default manifest for the image. manifestBytes, manifestType, err := src.GetManifest(ctx, nil) if err != nil { - return nil, fmt.Errorf("error reading default manifest from image %q: %w", transports.ImageName(ref), err) + return nil, fmt.Errorf("reading default manifest from image %q: %w", transports.ImageName(ref), err) } // If this is the first image, mark it as our starting point. @@ -223,18 +223,18 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Record the digest of the ImageSource's default instance's manifest. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return nil, fmt.Errorf("error computing digest of manifest from image %q: %w", transports.ImageName(ref), err) + return nil, fmt.Errorf("computing digest of manifest from image %q: %w", transports.ImageName(ref), err) } sis.sourceDefaultInstances[src] = manifestDigest // If the ImageSource's default manifest is a list, parse each of its instances. if manifest.MIMETypeIsMultiImage(manifestType) { if err = addMulti(manifestBytes, manifestType, src); err != nil { - return nil, fmt.Errorf("error adding multi-image %q: %w", transports.ImageName(ref), err) + return nil, fmt.Errorf("adding multi-image %q: %w", transports.ImageName(ref), err) } } else { if err = addSingle(manifestBytes, manifestType, src); err != nil { - return nil, fmt.Errorf("error adding single image %q: %w", transports.ImageName(ref), err) + return nil, fmt.Errorf("adding single image %q: %w", transports.ImageName(ref), err) } } } @@ -261,18 +261,18 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Trust that we either don't need it, or that it's in another reference. // continue // } - return nil, fmt.Errorf("error reading manifest for instance %q: %w", manifestToRead.instance, err) + return nil, fmt.Errorf("reading manifest for instance %q: %w", manifestToRead.instance, err) } if manifest.MIMETypeIsMultiImage(manifestType) { // Add the list's contents. if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil { - return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err) + return nil, fmt.Errorf("adding single image instance %q: %w", manifestToRead.instance, err) } } else { // Add the single image's contents. if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil { - return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err) + return nil, fmt.Errorf("adding single image instance %q: %w", manifestToRead.instance, err) } } } @@ -313,17 +313,17 @@ func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDiges } return sourceInstance.GetManifest(ctx, requestInstanceDigest) } - return nil, "", fmt.Errorf("error getting manifest for digest %q: %w", *instanceDigest, ErrDigestNotFound) + return nil, "", fmt.Errorf("getting manifest for digest %q: %w", *instanceDigest, ErrDigestNotFound) } func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) { sourceInstance, ok := s.instancesByBlobDigest[blob.Digest] if !ok { - return nil, -1, fmt.Errorf("error blob %q in known instances: %w", blob.Digest, ErrBlobNotFound) + return nil, -1, fmt.Errorf("blob %q in known instances: %w", blob.Digest, ErrBlobNotFound) } src, ok := s.sourceInstancesByInstance[sourceInstance] if !ok { - return nil, -1, fmt.Errorf("error getting image source for instance %q: %w", sourceInstance, ErrDigestNotFound) + return nil, -1, fmt.Errorf("getting image source for instance %q: %w", sourceInstance, ErrDigestNotFound) } return src.GetBlob(ctx, blob, bic) } @@ -364,7 +364,7 @@ func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDig if src != nil { return src.GetSignatures(ctx, requestInstanceDigest) } - return nil, fmt.Errorf("error finding instance for instance digest %q to read signatures: %w", digest, ErrDigestNotFound) + return nil, fmt.Errorf("finding instance for instance digest %q to read signatures: %w", digest, ErrDigestNotFound) } func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { @@ -387,7 +387,7 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc if src != nil { blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest) if err != nil { - return nil, fmt.Errorf("error reading layer infos for copy from instance %q: %w", instanceDigest, err) + return nil, fmt.Errorf("reading layer infos for copy from instance %q: %w", instanceDigest, err) } var manifestDigest digest.Digest if instanceDigest != nil { @@ -398,5 +398,5 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc } return blobInfos, nil } - return nil, fmt.Errorf("error finding instance for instance digest %q to copy layers: %w", errMsgDigest, ErrDigestNotFound) + return nil, fmt.Errorf("finding instance for instance digest %q to copy layers: %w", errMsgDigest, ErrDigestNotFound) } diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go index 3cbfe40980b..519884c553f 100644 --- a/vendor/github.com/containers/common/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -103,8 +103,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) { if value == "" { diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go index 6d7060af4a6..0cd53af53f5 100644 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -11,6 +11,7 @@ import ( "sync" "syscall" + "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -31,7 +32,10 @@ func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error rootlessRuntimeDirOnce.Do(func() { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + runtimeDir, err := homedir.GetRuntimeDir() + if err != nil { + logrus.Debug(err) + } if runtimeDir != "" { st, err := os.Stat(runtimeDir) if err != nil { diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index e398bac1dea..44362e6af05 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.49.1" +const Version = "0.51.0" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index ff0e7945dd7..5286975b620 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -238,7 +238,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom } // recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. -// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties. +// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { // Don’t record any associations that involve encrypted data. This is a bit crude, diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 6758d4de133..26521fe090b 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -24,6 +24,7 @@ import ( "github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" @@ -61,6 +62,7 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. +// The owner must call close() when done. type copier struct { dest private.ImageDestination rawSource private.ImageSource @@ -75,6 +77,8 @@ type copier struct { ociEncryptConfig *encconfig.EncryptConfig concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs downloadForeignLayers bool + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -121,17 +125,21 @@ type ImageListSelection int // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature. + // Signers to use to add signatures during the copy. + // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row. + Signers []*signer.Signer SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`. SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. - SignSigstorePrivateKeyPassphrase []byte // Passphare to use when signing with `SignBySigstorePrivateKeyFile`. + SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`. SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. // Preserve digests, and fail if we cannot. PreserveDigests bool @@ -257,6 +265,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, ociEncryptConfig: options.OciEncryptConfig, downloadForeignLayers: options.DownloadForeignLayers, } + defer c.close() // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { @@ -284,6 +293,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, c.compressionLevel = options.DestinationCtx.CompressionLevel } + if err := c.setupSigners(options); err != nil { + return nil, err + } + unparsedToplevel := image.UnparsedInstance(rawSource, nil) multiImage, err := isMultiImage(ctx, unparsedToplevel) if err != nil { @@ -340,6 +353,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return copiedManifest, nil } +// close tears down state owned by copier. +func (c *copier) close() { + for i, s := range c.signersToClose { + if err := s.Close(); err != nil { + logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err) + } + } +} + // Checks if the destination supports accepting multiple images by checking if it can support // manifest types that are lists of other manifests. func supportsMultipleImages(dest types.ImageDestination) bool { @@ -564,20 +586,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } // Sign the manifest list. - if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity) - if err != nil { - return nil, err - } - sigs = append(sigs, newSig) - } - if options.SignBySigstorePrivateKeyFile != "" { - newSig, err := c.createSigstoreSignature(manifestList, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity) - if err != nil { - return nil, err - } - sigs = append(sigs, newSig) + newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity) + if err != nil { + return nil, err } + sigs = append(sigs, newSigs...) c.Printf("Storing list signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { @@ -675,12 +688,12 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // Decide whether we can substitute blobs with semantic equivalents: // - Don’t do that if we can’t modify the manifest at all // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" && options.SignBySigstorePrivateKeyFile == "" + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -711,7 +724,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { - shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" || options.SignBySigstorePrivateKeyFile != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible noPendingManifestUpdates := ic.noPendingManifestUpdates() logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) @@ -791,20 +804,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli targetInstance = &retManifestDigest } - if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity) - if err != nil { - return nil, "", "", err - } - sigs = append(sigs, newSig) - } - if options.SignBySigstorePrivateKeyFile != "" { - newSig, err := c.createSigstoreSignature(manifestBytes, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity) - if err != nil { - return nil, "", "", err - } - sigs = append(sigs, newSig) + newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity) + if err != nil { + return nil, "", "", err } + sigs = append(sigs, newSigs...) c.Printf("Storing signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 6c3d9d62cad..fd19e18cd5d 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -7,11 +7,49 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/private" internalsig "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/signature" + internalSigner "github.com/containers/image/v5/internal/signer" "github.com/containers/image/v5/signature/sigstore" + "github.com/containers/image/v5/signature/simplesigning" "github.com/containers/image/v5/transports" ) +// setupSigners initializes c.signers based on options. +func (c *copier) setupSigners(options *Options) error { + c.signers = append(c.signers, options.Signers...) + // c.signersToClose is intentionally not updated with options.Signers. + + // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need + // to clean up any created signers on failure. + + if options.SignBy != "" { + opts := []simplesigning.Option{ + simplesigning.WithKeyFingerprint(options.SignBy), + } + if options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase)) + } + signer, err := simplesigning.NewSigner(opts...) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + if options.SignBySigstorePrivateKeyFile != "" { + signer, err := sigstore.NewSigner( + sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase), + ) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + return nil +} + // sourceSignatures returns signatures from unparsedSource based on options, // and verifies that they can be used (to avoid copying a large image when we // can tell in advance that it would ultimately fail) @@ -37,20 +75,16 @@ func (c *copier) sourceSignatures(ctx context.Context, unparsed private.Unparsed return sigs, nil } -// createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return nil, fmt.Errorf("initializing GPG: %w", err) - } - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - return nil, fmt.Errorf("Signing not supported: %w", err) +// createSignatures creates signatures for manifest and an optional identity. +func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) { + if len(c.signers) == 0 { + // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible. + return nil, nil } if identity != nil { if reference.IsNameOnly(identity) { - return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity) + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) } } else { identity = c.dest.Reference().DockerReference() @@ -59,31 +93,23 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase } } - c.Printf("Signing manifest using simple signing\n") - newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) - if err != nil { - return nil, fmt.Errorf("creating signature: %w", err) - } - return internalsig.SimpleSigningFromBlob(newSig), nil -} - -// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity. -func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) { - if identity != nil { - if reference.IsNameOnly(identity) { - return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + res := make([]internalsig.Signature, 0, len(c.signers)) + for signerIndex, signer := range c.signers { + msg := internalSigner.ProgressMessage(signer) + if len(c.signers) == 1 { + c.Printf("Creating signature: %s\n", msg) + } else { + c.Printf("Creating signature %d: %s\n", signerIndex+1, msg) } - } else { - identity = c.dest.Reference().DockerReference() - if identity == nil { - return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity) + if err != nil { + if len(c.signers) == 1 { + return nil, fmt.Errorf("creating signature: %w", err) + } else { + return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err) + } } + res = append(res, newSig) } - - c.Printf("Signing manifest using a sigstore signature\n") - newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase) - if err != nil { - return nil, fmt.Errorf("creating signature: %w", err) - } - return newSig, nil + return res, nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 47a5c17cd55..55b29fe17a7 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -105,7 +105,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (private.Im AcceptsForeignLayerURLs: false, MustMatchRuntimeOS: false, IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. - HasThreadSafePutBlob: false, + HasThreadSafePutBlob: true, }), NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go index 60521662ed9..632ee7c4936 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/dest.go +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -3,7 +3,6 @@ package archive import ( "context" "fmt" - "io" "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/internal/private" @@ -13,8 +12,8 @@ import ( type archiveImageDestination struct { *tarfile.Destination // Implements most of types.ImageDestination ref archiveReference - archive *tarfile.Writer // Should only be closed if writer != nil - writer io.Closer // May be nil if the archive is shared + writer *Writer // Should be closed if closeWriter + closeWriter bool } func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) { @@ -22,29 +21,28 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) } - var archive *tarfile.Writer - var writer io.Closer - if ref.archiveWriter != nil { - archive = ref.archiveWriter - writer = nil + var writer *Writer + var closeWriter bool + if ref.writer != nil { + writer = ref.writer + closeWriter = false } else { - fh, err := openArchiveForWriting(ref.path) + w, err := NewWriter(sys, ref.path) if err != nil { return nil, err } - - archive = tarfile.NewWriter(fh) - writer = fh + writer = w + closeWriter = true } - tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref) + tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref) if sys != nil && sys.DockerArchiveAdditionalTags != nil { tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) } return &archiveImageDestination{ Destination: tarDest, ref: ref, - archive: archive, writer: writer, + closeWriter: closeWriter, }, nil } @@ -56,7 +54,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *archiveImageDestination) Close() error { - if d.writer != nil { + if d.closeWriter { return d.writer.Close() } return nil @@ -70,8 +68,15 @@ func (d *archiveImageDestination) Close() error { // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - if d.writer != nil { - return d.archive.Close() + d.writer.imageCommitted() + if d.closeWriter { + // We could do this only in .Close(), but failures in .Close() are much more likely to be + // ignored by callers that use defer. So, in single-image destinations, try to complete + // the archive here. + // But if Commit() is never called, let .Close() clean up. + err := d.writer.Close() + d.closeWriter = false + return err } return nil } diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 9044b340b7d..304a8c618ff 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -53,7 +53,7 @@ type archiveReference struct { // file, not necessarily path precisely). archiveReader *tarfile.Reader // If not nil, must have been created for path - archiveWriter *tarfile.Writer + writer *Writer } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. @@ -108,7 +108,7 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro // newReference returns a docker archive reference for a path, an optional reference or sourceIndex, // and optionally a tarfile.Reader and/or a tarfile.Writer matching path. func newReference(path string, ref reference.NamedTagged, sourceIndex int, - archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) { + archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) { if strings.Contains(path, ":") { return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) } @@ -126,7 +126,7 @@ func newReference(path string, ref reference.NamedTagged, sourceIndex int, ref: ref, sourceIndex: sourceIndex, archiveReader: archiveReader, - archiveWriter: archiveWriter, + writer: writer, }, nil } diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go index 2d8fafe2922..315c282ca5f 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/writer.go +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "sync" "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" @@ -13,47 +14,19 @@ import ( // Writer manages a single in-progress Docker archive and allows adding images to it. type Writer struct { - path string // The original, user-specified path; not the maintained temporary file, if any - archive *tarfile.Writer - writer io.Closer + path string // The original, user-specified path; not the maintained temporary file, if any + regularFile bool // path refers to a regular file (e.g. not a pipe) + archive *tarfile.Writer + writer io.Closer + + // The following state can only be acccessed with the mutex held. + mutex sync.Mutex + hadCommit bool // At least one successful commit has happened } // NewWriter returns a Writer for path. // The caller should call .Close() on the returned object. func NewWriter(sys *types.SystemContext, path string) (*Writer, error) { - fh, err := openArchiveForWriting(path) - if err != nil { - return nil, err - } - archive := tarfile.NewWriter(fh) - - return &Writer{ - path: path, - archive: archive, - writer: fh, - }, nil -} - -// Close writes all outstanding data about images to the archive, and -// releases state associated with the Writer, if any. -// No more images can be added after this is called. -func (w *Writer) Close() error { - err := w.archive.Close() - if err2 := w.writer.Close(); err2 != nil && err == nil { - err = err2 - } - return err -} - -// NewReference returns an ImageReference that allows adding an image to Writer, -// with an optional reference. -func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) { - return newReference(w.path, destinationRef, -1, nil, w.archive) -} - -// openArchiveForWriting opens path for writing a tar archive, -// making a few sanity checks. -func openArchiveForWriting(path string) (*os.File, error) { // path can be either a pipe or a regular file // in the case of a pipe, we require that we can open it for write // in the case of a regular file, we don't want to overwrite any pre-existing file @@ -69,15 +42,62 @@ func openArchiveForWriting(path string) (*os.File, error) { fh.Close() } }() + fhStat, err := fh.Stat() if err != nil { return nil, fmt.Errorf("statting file %q: %w", path, err) } - - if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + regularFile := fhStat.Mode().IsRegular() + if regularFile && fhStat.Size() != 0 { return nil, errors.New("docker-archive doesn't support modifying existing images") } + archive := tarfile.NewWriter(fh) + succeeded = true - return fh, nil + return &Writer{ + path: path, + regularFile: regularFile, + archive: archive, + writer: fh, + hadCommit: false, + }, nil +} + +// imageCommitted notifies the Writer that at least one image was successfully committed to the stream. +func (w *Writer) imageCommitted() { + w.mutex.Lock() + defer w.mutex.Unlock() + w.hadCommit = true +} + +// Close writes all outstanding data about images to the archive, and +// releases state associated with the Writer, if any. +// No more images can be added after this is called. +func (w *Writer) Close() error { + err := w.archive.Close() + if err2 := w.writer.Close(); err2 != nil && err == nil { + err = err2 + } + if err == nil && w.regularFile && !w.hadCommit { + // Writing to the destination never had a success; delete the destination if we created it. + // This is done primarily because we don’t implement adding another image to a pre-existing image, so if we + // left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without + // the caller manually deleting the partial archive. So, delete it instead. + // + // Archives with at least one successfully created image are left around; they might still be valuable. + // + // Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it. + // Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success. + if err2 := os.Remove(w.path); err2 != nil { + err = err2 + } + } + return err +} + +// NewReference returns an ImageReference that allows adding an image to Writer, +// with an optional reference. +func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) { + return newReference(w.path, destinationRef, -1, nil, w) } diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go new file mode 100644 index 00000000000..f958e80e8ec --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -0,0 +1,234 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "syscall" + "time" + + "github.com/sirupsen/logrus" +) + +// bodyReaderMinimumProgress is the minimum progress we want to see before we retry +const bodyReaderMinimumProgress = 1 * 1024 * 1024 + +// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob, +// which can transparently resume some (very limited) kinds of aborted connections. +type bodyReader struct { + ctx context.Context + c *dockerClient + + path string // path to pass to makeRequest to retry + logURL *url.URL // a string to use in error messages + body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. + lastRetryOffset int64 + offset int64 // Current offset within the blob + firstConnectionTime time.Time + lastSuccessTime time.Time // time.Time{} if N/A +} + +// newBodyReader creates a bodyReader for request path in c. +// firstBody is an already correctly opened body for the blob, returing the full blob from the start. +// If reading from firstBody fails, bodyReader may heuristically decide to resume. +func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { + logURL, err := c.resolveRequestURL(path) + if err != nil { + return nil, err + } + res := &bodyReader{ + ctx: ctx, + c: c, + + path: path, + logURL: logURL, + body: firstBody, + lastRetryOffset: 0, + offset: 0, + firstConnectionTime: time.Now(), + } + return res, nil +} + +// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number. +func parseDecimalInString(s string, start int) (int64, int, error) { + i := start + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + if i == start { + return -1, -1, errors.New("missing decimal number") + } + v, err := strconv.ParseInt(s[start:i], 10, 64) + if err != nil { + return -1, -1, fmt.Errorf("parsing number: %w", err) + } + return v, i, nil +} + +// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it. +func parseExpectedChar(s string, pos int, expected byte) (int, error) { + if pos == len(s) || s[pos] != expected { + return -1, fmt.Errorf("missing expected %q", expected) + } + return pos + 1, nil +} + +// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1. +func parseContentRange(res *http.Response) (int64, int64, int64, error) { + hdrs := res.Header.Values("Content-Range") + switch len(hdrs) { + case 0: + return -1, -1, -1, errors.New("missing Content-Range: header") + case 1: + break + default: + return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs)) + } + hdr := hdrs[0] + expectedPrefix := "bytes " + if !strings.HasPrefix(hdr, expectedPrefix) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix) + } + first, pos, err := parseDecimalInString(hdr, len(expectedPrefix)) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '-') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + last, pos, err := parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '/') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + completeLength := int64(-1) + if pos < len(hdr) && hdr[pos] == '*' { + pos++ + } else { + completeLength, pos, err = parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err) + } + } + if pos < len(hdr) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr) + } + return first, last, completeLength, nil +} + +// Read implements io.ReadCloser +func (br *bodyReader) Read(p []byte) (int, error) { + if br.body == nil { + return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted()) + } + n, err := br.body.Read(p) + br.offset += int64(n) + switch { + case err == nil || err == io.EOF: + br.lastSuccessTime = time.Now() + return n, err // Unlike the default: case, don’t log anything. + + case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET): + originalErr := err + redactedURL := br.logURL.Redacted() + if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil { + return n, err + } + + if err := br.body.Close(); err != nil { + logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise + } + br.body = nil + time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + + headers := map[string][]string{ + "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, + } + res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil) + if err != nil { + return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err) + } + consumedBody := false + defer func() { + if !consumedBody { + res.Body.Close() + } + }() + switch res.StatusCode { + case http.StatusPartialContent: // OK + // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed. + // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation. + first, last, completeLength, err := parseContentRange(res) + if err != nil { + return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err) + } + // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob. + if first != br.offset || (completeLength != -1 && last+1 != completeLength) { + return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength) + } + // Continue below + case http.StatusOK: + return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK) + default: + err := registryHTTPResponseToError(res) + return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) + } + + logrus.Debugf("Succesfully reconnected to %s", redactedURL) + consumedBody = true + br.body = res.Body + br.lastRetryOffset = br.offset + return n, nil + + default: + logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err) + return n, err + } +} + +// millisecondsSince is like time.Since(tm).Milliseconds, but it returns a floating-point value +func millisecondsSince(tm time.Time) float64 { + return float64(time.Since(tm).Nanoseconds()) / 1_000_000.0 +} + +// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil, +// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic) +func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error { + totalTime := millisecondsSince(br.firstConnectionTime) + failureTime := math.NaN() + if (br.lastSuccessTime != time.Time{}) { + failureTime = millisecondsSince(br.lastSuccessTime) + } + logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: lastRetryOffset %d, offset %d, %.3f ms since first connection, %.3f ms since last progress", + redactedURL, originalErr, br.lastRetryOffset, br.offset, totalTime, failureTime) + progress := br.offset - br.lastRetryOffset + if progress < bodyReaderMinimumProgress { + logrus.Debugf("Not reconnecting to %s because only %d bytes progress made", redactedURL, progress) + return fmt.Errorf("(heuristic tuning data: last retry %d, current offset %d; %.3f ms total, %.3f ms since progress): %w", + br.lastRetryOffset, br.offset, totalTime, failureTime, originalErr) + } + logrus.Infof("Reading blob body from %s failed (%v), reconnecting…", redactedURL, originalErr) + return nil +} + +// Close implements io.ReadCloser +func (br *bodyReader) Close() error { + if br.body == nil { + return nil + } + err := br.body.Close() + br.body = nil + return err +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 323a02fc095..7d2a98d6844 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -30,13 +30,13 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { // // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set // TLSClientConfig to nil. This can be achieved by using the form `http://` - url, err := dockerclient.ParseHostURL(host) + serverURL, err := dockerclient.ParseHostURL(host) if err != nil { return nil, err } var httpClient *http.Client - if url.Scheme != "unix" { - if url.Scheme == "http" { + if serverURL.Scheme != "unix" { + if serverURL.Scheme == "http" { httpClient = httpConfig() } else { hc, err := tlsConfig(sys) diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 31ce167f13a..ad218908439 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { // For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) // For daemonImageDestination, it must be a ref, which is NamedTagged. // (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. -// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) type daemonReference struct { id digest.Digest ref reference.Named // !reference.IsNameOnly diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go similarity index 62% rename from vendor/github.com/docker/distribution/registry/client/errors.go rename to vendor/github.com/containers/image/v5/docker/distribution_error.go index 024df43dd92..0fe915249b7 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -1,46 +1,60 @@ -package client +// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go +// Copyright 2022 github.com/distribution/distribution authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker import ( "encoding/json" "errors" "fmt" "io" - "io/ioutil" "net/http" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" + dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" ) -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errNoErrorsInBody is returned when an HTTP response body parses to an empty // errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") +var errNoErrorsInBody = errors.New("no error details found in HTTP response body") -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// unexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. -type UnexpectedHTTPStatusError struct { +type unexpectedHTTPStatusError struct { Status string } -func (e *UnexpectedHTTPStatusError) Error() string { +func (e *unexpectedHTTPStatusError) Error() string { return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } -// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// unexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { +type unexpectedHTTPResponseError struct { ParseErr error StatusCode int Response []byte } -func (e *UnexpectedHTTPResponseError) Error() string { +func (e *unexpectedHTTPResponseError) Error() string { return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors - body, err := ioutil.ReadAll(r) + body, err := io.ReadAll(r) if err != nil { return err } @@ -55,8 +69,6 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { switch statusCode { case http.StatusUnauthorized: return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusForbidden: - return errcode.ErrorCodeDenied.WithMessage(detailsErr.Details) case http.StatusTooManyRequests: return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) default: @@ -65,7 +77,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { } if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ + return &unexpectedHTTPResponseError{ ParseErr: err, StatusCode: statusCode, Response: body, @@ -75,8 +87,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { if len(errors) == 0 { // If there was no error specified in the body, return // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, + return &unexpectedHTTPResponseError{ + ParseErr: errNoErrorsInBody, StatusCode: statusCode, Response: body, } @@ -96,15 +108,15 @@ func mergeErrors(err1, err2 error) error { return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) } -// HandleErrorResponse returns error parsed from HTTP response for an +// handleErrorResponse returns error parsed from HTTP response for an // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // UnexpectedHTTPStatusError returned for response code outside of expected // range. -func HandleErrorResponse(resp *http.Response) error { +func handleErrorResponse(resp *http.Response) error { if resp.StatusCode >= 400 && resp.StatusCode < 500 { // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { + for _, c := range dockerChallenge.ResponseChallenges(resp) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -126,16 +138,10 @@ func HandleErrorResponse(resp *http.Response) error { } } err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 + return &unexpectedHTTPStatusError{Status: resp.Status} } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 5de07674089..9e8fece032d 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,6 +1,7 @@ package docker import ( + "bytes" "context" "crypto/tls" "encoding/json" @@ -18,16 +19,15 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/useragent" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" - "github.com/containers/image/v5/version" "github.com/containers/storage/pkg/homedir" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" - clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -68,8 +68,6 @@ var ( {path: etcDir + "/containers/certs.d", absolute: true}, {path: etcDir + "/docker/certs.d", absolute: true}, } - - defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: @@ -126,8 +124,9 @@ type dockerClient struct { } type authScope struct { - remoteName string - actions string + resourceType string + remoteName string + actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. @@ -236,6 +235,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis } client.signatureBase = sigBase client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref) + client.scope.resourceType = "repository" client.scope.actions = actions client.scope.remoteName = reference.Path(ref.ref) return client, nil @@ -282,7 +282,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } tlsClientConfig.InsecureSkipVerify = skipVerify - userAgent := defaultUserAgent + userAgent := useragent.DefaultUserAgent if sys != nil && sys.DockerRegistryUserAgent != "" { userAgent = sys.DockerRegistryUserAgent } @@ -312,8 +312,14 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password return err } defer resp.Body.Close() - - return httpResponseToError(resp, "") + if resp.StatusCode != http.StatusOK { + err := registryHTTPResponseToError(resp) + if resp.StatusCode == http.StatusUnauthorized { + err = ErrUnauthorizedForCredentials{Err: err} + } + return err + } + return nil } // SearchResult holds the information of each matching image @@ -410,7 +416,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err := httpResponseToError(resp, "") + err := registryHTTPResponseToError(resp) logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) } @@ -466,12 +472,49 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } + requestURL, err := c.resolveRequestURL(path) + if err != nil { + return nil, err + } + return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope) +} + +// resolveRequestURL turns a path for c.makeRequest into a full URL. +// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs. +func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - url, err := url.Parse(urlString) + res, err := url.Parse(urlString) if err != nil { return nil, err } - return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) + return res, nil +} + +// Checks if the auth headers in the response contain an indication of a failed +// authorizdation because of an "insufficient_scope" error. If that's the case, +// returns the required scope to be used for fetching a new token. +func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) { + if err == nil && res.StatusCode == http.StatusUnauthorized { + challenges := parseAuthHeader(res.Header) + for _, challenge := range challenges { + if challenge.Scheme == "bearer" { + if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { + if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { + if newScope, err := parseAuthScope(scope); err == nil { + return true, newScope + } else { + logrus.WithFields(logrus.Fields{ + "error": err, + "scope": scope, + "challenge": challenge, + }).Error("Failed to parse the authentication scope from the given challenge") + } + } + } + } + } + } + return false, nil } // parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it, @@ -487,9 +530,8 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat return time.Duration(num) * time.Second } // Second, check if we have an HTTP date. - // If the delta between the date and now is positive, use it. - // Otherwise, fall back to using the default exponential back off. if t, err := http.ParseTime(after); err == nil { + // If the delta between the date and now is positive, use it. delta := time.Until(t) if delta > 0 { return delta @@ -497,7 +539,6 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat logrus.Debugf("Retry-After date in the past, ignoring it") return fallbackDelay } - // If the header contents are bogus, fall back to using the default exponential back off. logrus.Debugf("Invalid Retry-After format, ignoring it") return fallbackDelay } @@ -507,12 +548,35 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat // makeRequest should generally be preferred. // In case of an HTTP 429 status code in the response, it may automatically retry a few times. // TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { delay := backoffInitialDelay attempts := 0 for { - res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope) + res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope) attempts++ + + // By default we use pre-defined scopes per operation. In + // certain cases, this can fail when our authentication is + // insufficient, then we might be getting an error back with a + // Www-Authenticate Header indicating an insufficient scope. + // + // Check for that and update the client challenges to retry after + // requesting a new token + // + // We only try this on the first attempt, to not overload an + // already struggling server. + // We also cannot retry with a body (stream != nil) as stream + // was already read + if attempts == 1 && stream == nil && auth != noAuth { + if retry, newScope := needsRetryWithUpdatedScope(err, res); retry { + logrus.Debug("Detected insufficient_scope error, will retry request with updated scope") + // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently + // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support + // for more than one extra scope. + res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope) + extraScope = newScope + } + } if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately stream != nil || // We can't retry with a body (which is not restartable in the general case) attempts == backoffNumIterations { @@ -525,14 +589,14 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri if delay > backoffMaxDelay { delay = backoffMaxDelay } - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds()) + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) select { case <-ctx.Done(): return nil, ctx.Err() case <-time.After(delay): // Nothing } - delay = delay * 2 // exponential back off + delay = delay * 2 // If the registry does not specify a delay, back off exponentially. } } @@ -540,8 +604,8 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url.String(), stream) +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream) if err != nil { return nil, err } @@ -560,7 +624,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method return nil, err } } - logrus.Debugf("%s %s", method, url.Redacted()) + logrus.Debugf("%s %s", method, resolvedURL.Redacted()) res, err := c.client.Do(req) if err != nil { return nil, err @@ -592,8 +656,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope cacheKey := "" scopes := []authScope{c.scope} if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). - cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + // Using ':' as a separator here is unambiguous because getBearerToken below + // uses the same separator when formatting a remote request (and because + // repository names that we create can't contain colons, and extraScope values + // coming from a server come from `parseAuthScope`, which also splits on colons). + cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) + if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { + return fmt.Errorf( + "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", + cacheKey, + colonCount, + ) + } scopes = append(scopes, *extraScope) } var token bearerToken @@ -648,9 +722,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall if service, ok := challenge.Parameters["service"]; ok && service != "" { params.Add("service", service) } + for _, scope := range scopes { - if scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) } } params.Add("grant_type", "refresh_token") @@ -700,8 +775,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, } for _, scope := range scopes { - if scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) } } @@ -742,19 +817,19 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { c.client = &http.Client{Transport: tr} ping := func(scheme string) error { - url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) if err != nil { return err } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) return err } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) + logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return httpResponseToError(resp, "") + return registryHTTPResponseToError(resp) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme @@ -772,17 +847,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { - url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) if err != nil { return false } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) return false } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) + logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } @@ -840,14 +915,14 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } for _, u := range urls { - url, err := url.Parse(u) - if err != nil || (url.Scheme != "http" && url.Scheme != "https") { + blobURL, err := url.Parse(u) + if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { continue // unsupported url. skip this url. } // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) if err == nil { if resp.StatusCode != http.StatusOK { err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) @@ -894,12 +969,20 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty if err != nil { return nil, 0, err } - if err := httpResponseToError(res, "Error fetching blob"); err != nil { + if res.StatusCode != http.StatusOK { + err := registryHTTPResponseToError(res) res.Body.Close() - return nil, 0, err + return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - return res.Body, getBlobSize(res), nil + blobSize := getBlobSize(res) + + reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) + if err != nil { + res.Body.Close() + return nil, 0, err + } + return reconnectingReader, blobSize, nil } // getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit. @@ -920,16 +1003,22 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR // isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error. func isManifestUnknownError(err error) bool { - var errs errcode.Errors - if !errors.As(err, &errs) || len(errs) == 0 { - return false - } - err = errs[0] - ec, ok := err.(errcode.ErrorCoder) - if !ok { - return false - } - return ec.ErrorCode() == v2.ErrorCodeManifestUnknown + // docker/distribution, and as defined in the spec + var ec errcode.ErrorCoder + if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown { + return true + } + // registry.redhat.io as of October 2022 + var e errcode.Error + if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { + return true + } + // ALSO registry.redhat.io as of October 2022 + var unexpected *unexpectedHTTPResponseError + if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) { + return true + } + return false } // getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for @@ -975,9 +1064,8 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe return nil, err } defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res)) + return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res)) } body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 3e8dbbee134..6a4331e3356 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -77,8 +77,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. return nil, err } defer res.Body.Close() - if err := httpResponseToError(res, "fetching tags list"); err != nil { - return nil, err + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res)) } var tagsHolder struct { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 6cd693b6bb7..7a7f72d9a24 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -244,7 +244,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. logrus.Debugf("... not present") return false, -1, nil default: - return false, -1, fmt.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) } } @@ -358,8 +358,9 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // Checking candidateRepo, and mounting from it, requires an // expanded token scope. extraScope := &authScope{ - remoteName: reference.Path(candidateRepo), - actions: "pull", + resourceType: "repository", + remoteName: reference.Path(candidateRepo), + actions: "pull", } // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. @@ -406,7 +407,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { - refTail := "" + var refTail string if instanceDigest != nil { // If the instanceDigest is provided, then use it as the refTail, because the reference, // whether it includes a tag or a digest, refers to the list as a whole, and not this @@ -486,15 +487,10 @@ func successStatus(status int) bool { return status >= 200 && status <= 399 } -// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error. +// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid” error. func isManifestInvalidError(err error) bool { - errors, ok := err.(errcode.Errors) - if !ok || len(errors) == 0 { - return false - } - err = errors[0] - ec, ok := err.(errcode.ErrorCoder) - if !ok { + var ec errcode.ErrorCoder + if ok := errors.As(err, &ec); !ok { return false } @@ -584,8 +580,8 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature // NOTE: Keep this in sync with docs/signature-protocols.md! for i, signature := range signatures { - url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - err := d.putOneSignature(url, signature) + sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) + err := d.putOneSignature(sigURL, signature) if err != nil { return err } @@ -596,8 +592,8 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature // is enough for dockerImageSource to stop looking for other signatures, so that // is sufficient. for i := len(signatures); ; i++ { - url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - missing, err := d.c.deleteOneSignature(url) + sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) + missing, err := d.c.deleteOneSignature(sigURL) if err != nil { return err } @@ -609,13 +605,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature return nil } -// putOneSignature stores sig to url. +// putOneSignature stores sig to sigURL. // NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Signature) error { - switch url.Scheme { +func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error { + switch sigURL.Scheme { case "file": - logrus.Debugf("Writing to %s", url.Path) - err := os.MkdirAll(filepath.Dir(url.Path), 0755) + logrus.Debugf("Writing to %s", sigURL.Path) + err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755) if err != nil { return err } @@ -623,16 +619,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Sig if err != nil { return err } - err = os.WriteFile(url.Path, blob, 0644) + err = os.WriteFile(sigURL.Path, blob, 0644) if err != nil { return err } return nil case "http", "https": - return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted()) + return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) default: - return fmt.Errorf("Unsupported scheme when writing signature to %s", url.Redacted()) + return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted()) } } @@ -643,7 +639,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) if err != nil { - return nil + return err } var ociConfig imgspecv1.Image // Most fields empty by default if ociManifest == nil { @@ -652,6 +648,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. Digest: "", // We will fill this in later. Size: 0, }, nil) + ociConfig.RootFS.Type = "layers" } else { logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String()) // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. @@ -714,13 +711,13 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. LayerIndex: nil, }) if err != nil { - return nil + return err } ociManifest.Config = configDesc manifestBlob, err := ociManifest.Serialize() if err != nil { - return nil + return err } logrus.Debugf("Uploading sigstore attachment manifest") return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest)) @@ -770,23 +767,23 @@ func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents }, nil } -// deleteOneSignature deletes a signature from url, if it exists. +// deleteOneSignature deletes a signature from sigURL, if it exists. // If it successfully determines that the signature does not exist, returns (true, nil) // NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { - switch url.Scheme { +func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) { + switch sigURL.Scheme { case "file": - logrus.Debugf("Deleting %s", url.Path) - err := os.Remove(url.Path) + logrus.Debugf("Deleting %s", sigURL.Path) + err := os.Remove(sigURL.Path) if err != nil && os.IsNotExist(err) { return true, nil } return false, err case "http", "https": - return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted()) + return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) default: - return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted()) + return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted()) } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index b0e87797102..373fb259c1e 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -10,7 +10,6 @@ import ( "net/http" "net/url" "os" - "regexp" "strings" "sync" @@ -24,10 +23,15 @@ import ( "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server, +// even if it were broken or malicious and it continued serving an enormous number of items. +const maxLookasideSignatures = 128 + type dockerImageSource struct { impl.Compat impl.PropertyMethodsInitialize @@ -299,7 +303,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } } -var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") +var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") func parseMediaType(contentType string) (string, map[string]string, error) { mediaType, params, err := mime.ParseMediaType(contentType) @@ -372,12 +376,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, res.Body.Close() return nil, nil, private.BadPartialRequestError{Status: res.Status} default: - err := httpResponseToError(res, "Error fetching partial blob") - if err == nil { - err = fmt.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } + err := registryHTTPResponseToError(res) res.Body.Close() - return nil, nil, err + return nil, nil, fmt.Errorf("fetching partial blob: %w", err) } } @@ -451,8 +452,12 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst // NOTE: Keep this in sync with docs/signature-protocols.md! signatures := []signature.Signature{} for i := 0; ; i++ { - url := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) - signature, missing, err := s.getOneSignature(ctx, url) + if i >= maxLookasideSignatures { + return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) + } + + sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) + signature, missing, err := s.getOneSignature(ctx, sigURL) if err != nil { return nil, err } @@ -464,14 +469,14 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst return signatures, nil } -// getOneSignature downloads one signature from url, and returns (signature, false, nil) +// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) // If it successfully determines that the signature does not exist, returns (nil, true, nil). // NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature.Signature, bool, error) { - switch url.Scheme { +func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) { + switch sigURL.Scheme { case "file": - logrus.Debugf("Reading %s", url.Path) - sigBlob, err := os.ReadFile(url.Path) + logrus.Debugf("Reading %s", sigURL.Path) + sigBlob, err := os.ReadFile(sigURL.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil @@ -480,13 +485,13 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( } sig, err := signature.FromBlob(sigBlob) if err != nil { - return nil, false, fmt.Errorf("parsing signature %q: %w", url.Path, err) + return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err) } return sig, false, nil case "http", "https": - logrus.Debugf("GET %s", url.Redacted()) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) + logrus.Debugf("GET %s", sigURL.Redacted()) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil) if err != nil { return nil, false, err } @@ -496,22 +501,31 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { + logrus.Debugf("... got status 404, as expected = end of signatures") return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) } + + contentType := res.Header.Get("Content-Type") + if mimeType := simplifyContentType(contentType); mimeType == "text/html" { + logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType) + // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type. + // If the content really is HTML, it’s going to fail in signature.FromBlob. + } + sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) if err != nil { return nil, false, err } sig, err := signature.FromBlob(sigBlob) if err != nil { - return nil, false, fmt.Errorf("parsing signature %s: %w", url.Redacted(), err) + return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err) } return sig, false, nil default: - return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.Redacted()) + return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted()) } } @@ -605,16 +619,16 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return err } defer get.Body.Close() - manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) - if err != nil { - return err - } switch get.StatusCode { case http.StatusOK: case http.StatusNotFound: return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) default: - return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get)) + } + manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) + if err != nil { + return err } manifestDigest, err := manifest.Digest(manifestBody) @@ -630,18 +644,13 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return err } defer delete.Body.Close() - - body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize) - if err != nil { - return err - } if delete.StatusCode != http.StatusAccepted { - return fmt.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) + return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete)) } for i := 0; ; i++ { - url := lookasideStorageURL(c.signatureBase, manifestDigest, i) - missing, err := c.deleteOneSignature(url) + sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i) + missing, err := c.deleteOneSignature(sigURL) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 79590c4c746..74fe17648cc 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -5,7 +5,8 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" ) var ( @@ -35,7 +36,7 @@ func httpResponseToError(res *http.Response, context string) error { case http.StatusTooManyRequests: return ErrTooManyRequests case http.StatusUnauthorized: - err := client.HandleErrorResponse(res) + err := registryHTTPResponseToError(res) return ErrUnauthorizedForCredentials{Err: err} default: if context != "" { @@ -48,13 +49,48 @@ func httpResponseToError(res *http.Response, context string) error { // registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution // registry func registryHTTPResponseToError(res *http.Response) error { - err := client.HandleErrorResponse(res) - if e, ok := err.(*client.UnexpectedHTTPResponseError); ok { + err := handleErrorResponse(res) + // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. + if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 { + // The docker/distribution registry implementation almost never returns + // more than one error in the HTTP body; it seems there is only one + // possible instance, where the second error reports a cleanup failure + // we don't really care about. + // + // The only _common_ case where a multi-element error is returned is + // created by the handleErrorResponse parser when OAuth authorization fails: + // the first element contains errors from a WWW-Authenticate header, the second + // element contains errors from the response body. + // + // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized + // for invalid tokens, ErrorCodeDenied for permission denied with a valid token + // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.) + // + // Also, docker/docker similarly only logs the other errors and returns the + // first one. + if len(errs) > 1 { + logrus.Debugf("Discarding non-primary errors:") + for _, err := range errs[1:] { + logrus.Debugf(" %s", err.Error()) + } + } + err = errs[0] + } + switch e := err.(type) { + case *unexpectedHTTPResponseError: response := string(e.Response) if len(response) > 50 { response = response[:50] + "..." } - err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response) + // %.0w makes e visible to error.Unwrap() without including any text + err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e) + case errcode.Error: + // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually + // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. + if e.Message == e.Code.Message() { + // %.0w makes e visible to error.Unwrap() without including any text + err = fmt.Errorf("%s%.0w", e.Message, e) + } } return err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index eec7b84e526..3b986f503d9 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -34,15 +34,19 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { } defer file.Close() - // If the file is already not compressed we can just return the file itself + // If the file is seekable and already not compressed we can just return the file itself // as a source. Otherwise we pass the stream to NewReaderFromStream. - stream, isCompressed, err := compression.AutoDecompress(file) - if err != nil { - return nil, fmt.Errorf("detecting compression for file %q: %w", path, err) - } - defer stream.Close() - if !isCompressed { - return newReader(path, false) + var stream io.Reader = file + if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible + decompressed, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, fmt.Errorf("detecting compression for file %q: %w", path, err) + } + defer decompressed.Close() + stream = decompressed + if !isCompressed { + return newReader(path, false) + } } return NewReaderFromStream(sys, stream) } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index f6ee041c496..44e1004a622 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -346,7 +346,7 @@ func (t *tarFI) Sys() interface{} { func (w *Writer) sendSymlinkLocked(path string, target string) error { hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) if err != nil { - return nil + return err } logrus.Debugf("Sending as tar link %s -> %s", path, target) return w.tar.WriteHeader(hdr) @@ -363,7 +363,7 @@ func (w *Writer) sendBytesLocked(path string, b []byte) error { func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") if err != nil { - return nil + return err } logrus.Debugf("Sending as tar file %s", path) if err := w.tar.WriteHeader(hdr); err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go index 8c0c23b2fe1..b7cd00b0d68 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/reference.go +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -3,13 +3,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go index 78603493203..bb8c5547438 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/regexp.go +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -1,143 +1,155 @@ package reference -import "regexp" +import ( + storageRegexp "github.com/containers/storage/pkg/regexp" + "regexp" + "strings" +) -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a +const ( + // alphaNumeric defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) + alphaNumeric = `[a-z0-9]+` - // separatorRegexp defines the separators allowed to be embedded in name + // separator defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]*)` - // domainComponentRegexp restricts the registry domain component of a // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + // The string counterpart for TagRegexp. + tag = `[\w][\w.-]{0,127}` + + // The string counterpart for DigestRegexp. + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + + // The string counterpart for IdentifierRegexp. + identifier = `([a-f0-9]{64})` + + // The string counterpart for ShortIdentifierRegexp. + shortIdentifier = `([a-f0-9]{6,64})` +) + +var ( + // nameComponent restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponent = expression( + alphaNumeric, + optional(repeated(separator, alphaNumeric))) + + domain = expression( + domainComponent, + optional(repeated(literal(`.`), domainComponent)), + optional(literal(`:`), `[0-9]+`)) // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) + DomainRegexp = re(domain) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) + TagRegexp = re(tag) + anchoredTag = anchored(tag) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) + anchoredTagRegexp = storageRegexp.Delayed(anchoredTag) // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + DigestRegexp = re(digestPat) + anchoredDigest = anchored(digestPat) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) + anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest) + namePat = expression( + optional(domain, literal(`/`)), + nameComponent, + optional(repeated(literal(`/`), nameComponent))) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) + NameRegexp = re(namePat) + anchoredName = anchored( + optional(capture(domain), literal(`/`)), + capture(nameComponent, + optional(repeated(literal(`/`), nameComponent)))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) + anchoredNameRegexp = storageRegexp.Delayed(anchoredName) + referencePat = anchored(capture(namePat), + optional(literal(":"), capture(tag)), + optional(literal("@"), capture(digestPat))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) + ReferenceRegexp = re(referencePat) // IdentifierRegexp is the format for string identifier used as a // content addressable identifier using sha256. These identifiers // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) + IdentifierRegexp = re(identifier) // ShortIdentifierRegexp is the format used to represent a prefix // of an identifier. A prefix may be used to match a sha256 identifier // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + ShortIdentifierRegexp = re(shortIdentifier) + anchoredIdentifier = anchored(identifier) // anchoredIdentifierRegexp is used to check or match an // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) + anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier) ) -// match compiles the string to a regular expression. -var match = regexp.MustCompile +// re compiles the string to a regular expression. +var re = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re +func literal(s string) string { + return regexp.QuoteMeta(s) } // expression defines a full expression, where each regular expression must // follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) +func expression(res ...string) string { + return strings.Join(res, "") } // optional wraps the expression in a non-capturing group and makes the // production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) +func optional(res ...string) string { + return group(expression(res...)) + `?` } // repeated wraps the regexp in a non-capturing group to get one or more // matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) +func repeated(res ...string) string { + return group(expression(res...)) + `+` } // group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) +func group(res ...string) string { + return `(?:` + expression(res...) + `)` } // capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) +func capture(res ...string) string { + return `(` + expression(res...) + `)` } // anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) +func anchored(res ...string) string { + return `^` + expression(res...) + `$` } diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go index 37087dd857d..61a4964b6b1 100644 --- a/vendor/github.com/containers/image/v5/docker/registries_d.go +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -163,17 +163,17 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { // the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) { topLevel := config.signatureTopLevel(dr, write) - var url *url.URL + var baseURL *url.URL if topLevel != "" { u, err := url.Parse(topLevel) if err != nil { return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err) } - url = u + baseURL = u } else { // returns default directory if no lookaside specified in configuration file - url = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted()) + baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted()) } // NOTE: Keep this in sync with docs/signature-protocols.md! // FIXME? Restrict to explicitly supported schemes? @@ -181,8 +181,8 @@ func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) } - url.Path = url.Path + "/" + repo - return url, nil + baseURL.Path = baseURL.Path + "/" + repo + return baseURL, nil } // builtinDefaultLookasideStorageDir returns default signature storage URL as per euid @@ -201,8 +201,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ identity := ref.PolicyConfigurationIdentity() if ns, ok := config.Docker[identity]; ok { logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity) - if url := ns.signatureTopLevel(write); url != "" { - return url + if ret := ns.signatureTopLevel(write); ret != "" { + return ret } } @@ -210,8 +210,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ for _, name := range ref.PolicyConfigurationNamespaces() { if ns, ok := config.Docker[name]; ok { logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name) - if url := ns.signatureTopLevel(write); url != "" { - return url + if ret := ns.signatureTopLevel(write); ret != "" { + return ret } } } @@ -219,8 +219,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ // Look for a default location if config.DefaultDocker != nil { logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`) - if url := config.DefaultDocker.signatureTopLevel(write); url != "" { - return url + if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" { + return ret } } return "" @@ -287,7 +287,7 @@ func (ns registryNamespace) signatureTopLevel(write bool) string { // base is not nil from the caller // NOTE: Keep this in sync with docs/signature-protocols.md! func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL { - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) - return &url + sigURL := *base + sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &sigURL } diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index d0bbbba8a54..37ca098a810 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -3,6 +3,7 @@ package docker // Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. import ( + "fmt" "net/http" "strings" ) @@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge { return challenges } +// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` +func parseAuthScope(scopeStr string) (*authScope, error) { + if parts := strings.Split(scopeStr, ":"); len(parts) == 3 { + return &authScope{ + resourceType: parts[0], + remoteName: parts[1], + actions: parts[2], + }, nil + } + return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr) +} + // NOTE: This is not a fully compliant parser per RFC 7235: // Most notably it does not support more than one challenge within a single header // Some of the whitespace parsing also seems noncompliant. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go index ee34ffdbd9c..cff68ac16e2 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -22,13 +22,14 @@ type Compat struct { // for implementations of private.ImageDestination. // // Use it like this: -// type yourDestination struct { -// impl.Compat -// … -// } -// dest := &yourDestination{…} -// dest.Compat = impl.AddCompat(dest) // +// type yourDestination struct { +// impl.Compat +// … +// } +// +// dest := &yourDestination{…} +// dest.Compat = impl.AddCompat(dest) func AddCompat(dest private.ImageDestinationInternalOnly) Compat { return Compat{dest} } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go index e81eec8964a..ab233406a40 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go @@ -3,23 +3,25 @@ // Compare with imagedestination/impl, which might require non-trivial implementation work. // // There are two kinds of stubs: -// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination -// implementation: // -// type yourDestination struct { -// stubs.ImplementsPutBlobPartial -// … -// } -// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker -// means that a constructor must be called: -// type yourDestination struct { -// stubs.NoPutBlobPartialInitialize -// … -// } +// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination +// implementation: // -// dest := &yourDestination{ -// … -// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), -// } +// type yourDestination struct { +// stubs.ImplementsPutBlobPartial +// … +// } // +// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker +// means that a constructor must be called: +// +// type yourDestination struct { +// stubs.NoPutBlobPartialInitialize +// … +// } +// +// dest := &yourDestination{ +// … +// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), +// } package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go index 6f793291628..7d859c31258 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go @@ -19,13 +19,14 @@ type Compat struct { // for implementations of private.ImageSource. // // Use it like this: -// type yourSource struct { -// impl.Compat -// … -// } -// src := &yourSource{…} -// src.Compat = impl.AddCompat(src) // +// type yourSource struct { +// impl.Compat +// … +// } +// +// src := &yourSource{…} +// src.Compat = impl.AddCompat(src) func AddCompat(src private.ImageSourceInternalOnly) Compat { return Compat{src} } diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go index 134fd1b53c2..cb345395e26 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go @@ -3,23 +3,26 @@ // Compare with imagesource/impl, which might require non-trivial implementation work. // // There are two kinds of stubs: -// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource -// implementation: // -// type yourSource struct { -// stubs.ImplementsGetBlobAt -// … -// } -// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker -// means that a constructor must be called: -// type yourSource struct { -// stubs.NoGetBlobAtInitialize -// … -// } +// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource // -// dest := &yourSource{ -// … -// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), -// } +// implementation: // +// type yourSource struct { +// stubs.ImplementsGetBlobAt +// … +// } +// +// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker +// means that a constructor must be called: +// +// type yourSource struct { +// stubs.NoGetBlobAtInitialize +// … +// } +// +// dest := &yourSource{ +// … +// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), +// } package stubs diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go index 17342c8b761..30aff4c1e8c 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -7,6 +7,12 @@ const ( SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json" // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature" + // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey + SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle" + // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey + SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate" + // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey + SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain" ) // Sigstore is a github.com/cosign/cosign signature. diff --git a/vendor/github.com/containers/image/v5/internal/signer/signer.go b/vendor/github.com/containers/image/v5/internal/signer/signer.go new file mode 100644 index 00000000000..5720254d1ce --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signer/signer.go @@ -0,0 +1,47 @@ +package signer + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" +) + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This type is visible to external callers, so it has no public fields or methods apart from Close(). +// +// The owner of a Signer must call Close() when done. +type Signer struct { + implementation SignerImplementation +} + +// NewSigner creates a public Signer from a SignerImplementation +func NewSigner(impl SignerImplementation) *Signer { + return &Signer{implementation: impl} +} + +func (s *Signer) Close() error { + return s.implementation.Close() +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +// Alternatively, should SignImageManifest be provided a logging writer of some kind? +func ProgressMessage(signer *Signer) string { + return signer.implementation.ProgressMessage() +} + +// SignImageManifest invokes a SignerImplementation. +// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage. +func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) { + return signer.implementation.SignImageManifest(ctx, manifest, dockerReference) +} + +// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This interface is distinct from Signer so that implementations can be created outside of this package. +type SignerImplementation interface { + // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. + ProgressMessage() string + // SignImageManifest creates a new signature for manifest m as dockerReference. + SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) + Close() error +} diff --git a/vendor/github.com/containers/image/v5/internal/useragent/useragent.go b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go new file mode 100644 index 00000000000..7ac49693ed5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go @@ -0,0 +1,6 @@ +package useragent + +import "github.com/containers/image/v5/version" + +// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise. +var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 9cf7dd3a941..5f352acc2f1 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -228,3 +228,16 @@ func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, variants := findCompressionMIMETypeSet(variantTable, mimeType) return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. } + +// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer { + layers := make([]types.ImageInspectLayer, len(infos)) + for i, info := range infos { + layers[i].MIMEType = info.MediaType + layers[i].Digest = info.Digest + layers[i].Size = info.Size + layers[i].Annotations = info.Annotations + } + return layers +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index e1f1fb9d983..8e260c03db3 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -4,12 +4,12 @@ import ( "encoding/json" "errors" "fmt" - "regexp" "strings" "time" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" ) @@ -206,7 +206,7 @@ func (m *Schema1) fixManifestLayers() error { return nil } -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +var validHex = regexp.Delayed(`^([a-f0-9]{64})$`) func validateV1ID(id string) error { if ok := validHex.MatchString(id); !ok { @@ -221,13 +221,17 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { return nil, err } + layerInfos := m.LayerInfos() i := &types.ImageInspectInfo{ Tag: m.Tag, Created: &s1.Created, DockerVersion: s1.DockerVersion, Architecture: s1.Architecture, + Variant: s1.Variant, Os: s1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), + Author: s1.Author, } if s1.Config != nil { i.Labels = s1.Config.Labels diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index e79d0851f27..d9eca043bee 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -271,6 +271,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t if err := json.Unmarshal(config, s2); err != nil { return nil, err } + layerInfos := m.LayerInfos() i := &types.ImageInspectInfo{ Tag: "", Created: &s2.Created, @@ -278,7 +279,9 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t Architecture: s2.Architecture, Variant: s2.Variant, Os: s2.OS, - Layers: layerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), + Author: s2.Author, } if s2.Config != nil { i.Labels = s2.Config.Labels diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index fc325009cec..2c52423d9f9 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -212,15 +212,19 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type if err := json.Unmarshal(config, d1); err != nil { return nil, err } + layerInfos := m.LayerInfos() i := &types.ImageInspectInfo{ Tag: "", Created: v1.Created, DockerVersion: d1.DockerVersion, Labels: v1.Config.Labels, Architecture: v1.Architecture, + Variant: v1.Variant, Os: v1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), Env: v1.Config.Env, + Author: v1.Author, } return i, nil } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go index e5ad2570ef7..6c9ee334027 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -17,6 +17,17 @@ import ( "github.com/sirupsen/logrus" ) +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image” part of the provided reference. +type ImageNotFoundError struct { + ref ociArchiveReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + type ociArchiveImageSource struct { impl.Compat @@ -35,6 +46,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) if err != nil { + var notFound ocilayout.ImageNotFoundError + if errors.As(err, ¬Found) { + err = ImageNotFoundError{ref: ref} + } if err := tempDirRef.deleteTempDir(); err != nil { return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index b2d963b0192..408af20a421 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -21,6 +21,17 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image” part of the provided reference. +type ImageNotFoundError struct { + ref ociReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + type ociImageSource struct { impl.Compat impl.PropertyMethodsInitialize @@ -96,7 +107,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest var err error if instanceDigest == nil { - dig = digest.Digest(s.descriptor.Digest) + dig = s.descriptor.Digest mimeType = s.descriptor.MediaType } else { dig = *instanceDigest diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index be22bed6d5f..5375d3324ef 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -179,35 +179,25 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { return imgspecv1.Descriptor{}, err } - var d *imgspecv1.Descriptor if ref.image == "" { // return manifest if only one image is in the oci directory - if len(index.Manifests) == 1 { - d = &index.Manifests[0] - } else { + if len(index.Manifests) != 1 { // ask user to choose image when more than one image in the oci directory return imgspecv1.Descriptor{}, ErrMoreThanOneImage } + return index.Manifests[0], nil } else { // if image specified, look through all manifests for a match for _, md := range index.Manifests { if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { continue } - refName, ok := md.Annotations[imgspecv1.AnnotationRefName] - if !ok { - continue - } - if refName == ref.image { - d = &md - break + if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { + return md, nil } } } - if d == nil { - return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) - } - return *d, nil + return imgspecv1.Descriptor{}, ImageNotFoundError{ref} } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 8df1bfc8b62..42e8970a07b 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -332,7 +332,7 @@ var ( errEmptyCluster = errors.New("cluster has no server defined") ) -//helper for checking certificate/key/CA +// helper for checking certificate/key/CA func validateFileIsReadable(name string) error { answer, err := os.Open(name) defer func() { @@ -545,8 +545,10 @@ type clientConfigLoadingRules struct { // Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load // Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice +// +// - if the ExplicitPath, return the unmerged explicit file +// - Otherwise, return a merged config based on the Precedence slice +// // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index b2e4dfd9e8a..38f5d531d2e 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -67,14 +67,14 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { // doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { - url := *c.baseURL - url.Path = path + requestURL := *c.baseURL + requestURL.Path = path var requestBodyReader io.Reader if requestBody != nil { logrus.Debugf("Will send body: %s", requestBody) requestBodyReader = bytes.NewReader(requestBody) } - req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader) + req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), requestBodyReader) if err != nil { return nil, err } @@ -90,7 +90,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re req.Header.Set("Content-Type", "application/json") } - logrus.Debugf("%s %s", method, url.Redacted()) + logrus.Debugf("%s %s", method, requestURL.Redacted()) res, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go index f7971a48f5f..15990394459 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "regexp" "strings" "github.com/containers/image/v5/docker/policyconfiguration" @@ -12,6 +11,7 @@ import ( genericImage "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" ) func init() { @@ -35,7 +35,7 @@ func (t openshiftTransport) ParseReference(reference string) (types.ImageReferen // Note that imageNameRegexp is namespace/stream:tag, this // is HOSTNAME/namespace/stream:tag or parent prefixes. // Keep this in sync with imageNameRegexp! -var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") +var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 658d4e9035b..14a84414ab6 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -10,7 +10,6 @@ import ( "fmt" "os" "path/filepath" - "regexp" "strings" "github.com/containers/image/v5/directory/explicitfilepath" @@ -18,6 +17,7 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" ) const defaultOSTreeRepo = "/ostree/repo" @@ -216,7 +216,7 @@ func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemCon return errors.New("Deleting images not implemented for ostree: images") } -var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) +var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`) func encodeOStreeRef(in string) string { var buffer bytes.Buffer diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 9623546d805..c363cb535bc 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -32,11 +32,6 @@ type dockerConfigFile struct { CredHelpers map[string]string `json:"credHelpers,omitempty"` } -type authPath struct { - path string - legacyFormat bool -} - var ( defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") xdgConfigHomePath = filepath.FromSlash("containers/auth.json") @@ -52,11 +47,24 @@ var ( ErrNotSupported = errors.New("not supported") ) +// authPath combines a path to a file with container registry access keys, +// along with expected properties of that path (currently just whether it's) +// legacy format or not. +type authPath struct { + path string + legacyFormat bool +} + +// newAuthPathDefault constructs an authPath in non-legacy format. +func newAuthPathDefault(path string) authPath { + return authPath{path: path, legacyFormat: false} +} + // SetCredentials stores the username and password in a location // appropriate for sys and the users’ configuration. // A valid key is a repository, a namespace within a registry, or a registry hostname; // using forms other than just a registry may fail depending on configuration. -// Returns a human-redable description of the location that was updated. +// Returns a human-readable description of the location that was updated. // NOTE: The return value is only intended to be read by humans; its form is not an API, // it may change (or new forms can be added) any time. func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { @@ -78,25 +86,28 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s switch helper { // Special-case the built-in helpers for auth files. case sysregistriesv2.AuthenticationFileHelper: - desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { if ch, exists := auths.CredHelpers[key]; exists { if isNamespaced { - return false, unsupportedNamespaceErr(ch) + return false, "", unsupportedNamespaceErr(ch) + } + desc, err := setAuthToCredHelper(ch, key, username, password) + if err != nil { + return false, "", err } - return false, setAuthToCredHelper(ch, key, username, password) + return false, desc, nil } creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) newCreds := dockerAuthConfig{Auth: creds} auths.AuthConfigs[key] = newCreds - return true, nil + return true, "", nil }) // External helpers. default: if isNamespaced { err = unsupportedNamespaceErr(helper) } else { - desc = fmt.Sprintf("credential helper: %s", helper) - err = setAuthToCredHelper(helper, key, username, password) + desc, err = setAuthToCredHelper(helper, key, username, password) } } if err != nil { @@ -146,8 +157,8 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: for _, path := range getAuthFilePaths(sys, homedir.Get()) { - // readJSONFile returns an empty map in case the path doesn't exist. - auths, err := readJSONFile(path.path, path.legacyFormat) + // parse returns an empty map in case the path doesn't exist. + auths, err := path.parse() if err != nil { return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) } @@ -205,32 +216,32 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // by tests. func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { paths := []authPath{} - pathToAuth, lf, err := getPathToAuth(sys) + pathToAuth, userSpecifiedPath, err := getPathToAuth(sys) if err == nil { - paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf}) + paths = append(paths, pathToAuth) } else { // Error means that the path set for XDG_RUNTIME_DIR does not exist // but we don't want to completely fail in the case that the user is pulling a public image // Logging the error as a warning instead and moving on to pulling the image logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) } - xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") - if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homeDir, ".config") - } - paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) - if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { - paths = append(paths, - authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false}, - ) - } else { + if !userSpecifiedPath { + xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") + if xdgCfgHome == "" { + xdgCfgHome = filepath.Join(homeDir, ".config") + } + paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath))) + if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json"))) + } else { + paths = append(paths, + newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)), + ) + } paths = append(paths, - authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, ) } - paths = append(paths, - authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, - ) return paths } @@ -276,7 +287,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) + authConfig, err := findCredentialsInFile(key, registry, path) if err != nil { return types.DockerAuthConfig{}, "", err } @@ -403,7 +414,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { if innerHelper, exists := auths.CredHelpers[key]; exists { removeFromCredHelper(innerHelper) } @@ -411,7 +422,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { isLoggedIn = true delete(auths.AuthConfigs, key) } - return true, multiErr + return true, "", multiErr }) if err != nil { multiErr = multierror.Append(multiErr, err) @@ -446,18 +457,18 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { for registry, helper := range auths.CredHelpers { // Helpers in auth files are expected // to exist, so no special treatment // for them. if err := deleteAuthFromCredHelper(helper, registry); err != nil { - return false, err + return false, "", err } } auths.CredHelpers = make(map[string]string) auths.AuthConfigs = make(map[string]dockerAuthConfig) - return true, nil + return true, "", nil }) // External helpers. default: @@ -495,28 +506,28 @@ func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { return helperclient.List(p) } -// getPathToAuth gets the path of the auth.json file used for reading and writing credentials -// returns the path, and a bool specifies whether the file is in legacy format -func getPathToAuth(sys *types.SystemContext) (string, bool, error) { +// getPathToAuth gets the path of the auth.json file used for reading and writing credentials, +// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults) +func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { return getPathToAuthWithOS(sys, runtime.GOOS) } // getPathToAuthWithOS is an internal implementation detail of getPathToAuth, // it exists only to allow testing it with an artificial runtime.GOOS. -func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { if sys != nil { if sys.AuthFilePath != "" { - return sys.AuthFilePath, false, nil + return newAuthPathDefault(sys.AuthFilePath), true, nil } if sys.LegacyFormatAuthFilePath != "" { - return sys.LegacyFormatAuthFilePath, true, nil + return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil } if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil + return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil } } if goOS == "windows" || goOS == "darwin" { - return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil + return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil } runtimeDir := os.Getenv("XDG_RUNTIME_DIR") @@ -528,20 +539,20 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return "", false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) + return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil + return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil } - return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil + return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } -// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// parse unmarshals the authentications stored in the auth.json file and returns it // or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, readJSONFile returns an error -func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { +// if the file exists and is empty, this function returns an error. +func (path authPath) parse() (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := os.ReadFile(path) + raw, err := os.ReadFile(path.path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -550,15 +561,15 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { return dockerConfigFile{}, err } - if legacyFormat { + if path.legacyFormat { if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err) + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } return auths, nil } if err = json.Unmarshal(raw, &auths); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err) + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } if auths.AuthConfigs == nil { @@ -573,42 +584,48 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { // modifyJSON finds an auth.json file, calls editor on the contents, and // writes it back if editor returns true. -// Returns a human-redable description of the file, to be returned by SetCredentials. -func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) { - path, legacyFormat, err := getPathToAuth(sys) +// Returns a human-readable description of the file, to be returned by SetCredentials. +// +// The editor may also return a human-readable description of the updated location; if it is "", +// the file itself is used. +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) { + path, _, err := getPathToAuth(sys) if err != nil { return "", err } - if legacyFormat { - return "", fmt.Errorf("writes to %s using legacy format are not supported", path) + if path.legacyFormat { + return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path) } - dir := filepath.Dir(path) + dir := filepath.Dir(path.path) if err = os.MkdirAll(dir, 0700); err != nil { return "", err } - auths, err := readJSONFile(path, false) + auths, err := path.parse() if err != nil { - return "", fmt.Errorf("reading JSON file %q: %w", path, err) + return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) } - updated, err := editor(&auths) + updated, description, err := editor(&auths) if err != nil { - return "", fmt.Errorf("updating %q: %w", path, err) + return "", fmt.Errorf("updating %q: %w", path.path, err) } if updated { newData, err := json.MarshalIndent(auths, "", "\t") if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path, err) + return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) } - if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { - return "", fmt.Errorf("writing to file %q: %w", path, err) + if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil { + return "", fmt.Errorf("writing to file %q: %w", path.path, err) } } - return path, nil + if description == "" { + description = path.path + } + return description, nil } func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { @@ -636,7 +653,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, } } -func setAuthToCredHelper(credHelper, registry, username, password string) error { +// setAuthToCredHelper stores (username, password) for registry in credHelper. +// Returns a human-readable description of the destination, to be returned by SetCredentials. +func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds := &credentials.Credentials{ @@ -644,7 +663,10 @@ func setAuthToCredHelper(credHelper, registry, username, password string) error Username: username, Secret: password, } - return helperclient.Store(p, creds) + if err := helperclient.Store(p, creds); err != nil { + return "", err + } + return fmt.Sprintf("credential helper: %s", credHelper), nil } func deleteAuthFromCredHelper(credHelper, registry string) error { @@ -655,17 +677,17 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { // findCredentialsInFile looks for credentials matching "key" // (which is "registry" or a namespace in "registry") in "path". -func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { - auths, err := readJSONFile(path, legacyFormat) +func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { + auths, err := path.parse() if err != nil { - return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path, err) + return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) } // First try cred helpers. They should always be normalized. // This intentionally uses "registry", not "key"; we don't support namespaced // credentials in helpers. if ch, exists := auths.CredHelpers[registry]; exists { - logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) return getAuthFromCredHelper(ch, registry) } @@ -673,7 +695,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) var keys []string - if !legacyFormat { + if !path.legacyFormat { keys = authKeysForKey(key) } else { keys = []string{registry} @@ -683,7 +705,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types // keys we prefer exact matches as well. for _, key := range keys { if val, exists := auths.AuthConfigs[key]; exists { - return decodeDockerAuth(val) + return decodeDockerAuth(path.path, key, val) } } @@ -697,14 +719,14 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types // so account for that as well. registry = normalizeRegistry(registry) for k, v := range auths.AuthConfigs { - if normalizeAuthFileKey(k, legacyFormat) == registry { - return decodeDockerAuth(v) + if normalizeAuthFileKey(k, path.legacyFormat) == registry { + return decodeDockerAuth(path.path, k, v) } } // Only log this if we found nothing; getCredentialsWithHomeDir logs the // source of found data. - logrus.Debugf("No credentials matching %s found in %s", key, path) + logrus.Debugf("No credentials matching %s found in %s", key, path.path) return types.DockerAuthConfig{}, nil } @@ -729,9 +751,9 @@ func authKeysForKey(key string) (res []string) { return res } -// decodeDockerAuth decodes the username and password, which is -// encoded in base64. -func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) { +// decodeDockerAuth decodes the username and password from conf, +// which is entry key in path. +func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) { decoded, err := base64.StdEncoding.DecodeString(conf.Auth) if err != nil { return types.DockerAuthConfig{}, err @@ -740,6 +762,11 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) { parts := strings.SplitN(string(decoded), ":", 2) if len(parts) != 2 { // if it's invalid just skip, as docker does + if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those + logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. + } else { + logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path) + } return types.DockerAuthConfig{}, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 3e16d8ca2b8..eeb7c1effdb 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -20,9 +20,9 @@ import ( // short names. // // Examples: -// * short names: "image:tag", "library/fedora" -// * not short names: "quay.io/image", "localhost/image:tag", -// "server.org:5000/lib/image", "image@sha256:..." +// - short names: "image:tag", "library/fedora" +// - not short names: "quay.io/image", "localhost/image:tag", +// "server.org:5000/lib/image", "image@sha256:..." func IsShortName(input string) bool { isShort, _, _ := parseUnnormalizedShortName(input) return isShort @@ -59,8 +59,6 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) { // the tag or digest and stores it in the return values so that both can be // re-added to a possible resolved alias' or USRs at a later point. func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) { - normalized = named - tagged, isT := named.(reference.NamedTagged) if isT { isTagged = true @@ -170,7 +168,7 @@ func (r *Resolved) Description() string { // Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of // pull errors must equal the amount of pull candidates. func (r *Resolved) FormatPullErrors(pullErrors []error) error { - if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) { + if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) { pullErrors = append(pullErrors, fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) @@ -402,9 +400,9 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { // not a short name), it is returned as is. In case, it's a short name, the // returned slice of named references looks as follows: // -// 1) If present, the short-name alias -// 2) "localhost/" as used by many container engines such as Podman and Buildah -// 3) Unqualified-search registries from the registries.conf files +// 1. If present, the short-name alias +// 2. "localhost/" as used by many container engines such as Podman and Buildah +// 3. Unqualified-search registries from the registries.conf files // // Note that tags and digests are stripped from the specified name before // looking up an alias. Stripped off tags and digests are later on appended to diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 12939b24da6..7ebd3fd2202 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -335,7 +335,7 @@ func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAli return &conf, cache, nil } -func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) { +func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) { shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx) if err != nil { return "", nil, err @@ -346,6 +346,6 @@ func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile } lockPath := shortNameAliasesConfPath + ".lock" - locker, err := lockfile.GetLockfile(lockPath) + locker, err := lockfile.GetLockFile(lockPath) return shortNameAliasesConfPath, locker, err } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 41204dd9afc..463e770280e 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -6,7 +6,6 @@ import ( "os" "path/filepath" "reflect" - "regexp" "sort" "strings" "sync" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" ) @@ -198,6 +198,7 @@ type V1RegistriesConf struct { } // Nonempty returns true if config contains at least one configuration entry. +// Empty arrays are treated as missing entries. func (config *V1RegistriesConf) Nonempty() bool { copy := *config // A shallow copy if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { @@ -209,7 +210,15 @@ func (config *V1RegistriesConf) Nonempty() bool { if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { copy.V1TOMLConfig.Block.Registries = nil } - return !reflect.DeepEqual(copy, V1RegistriesConf{}) + return copy.hasSetField() +} + +// hasSetField returns true if config contains at least one configuration entry. +// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field +// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled +// as a non-nil []string{}. +func (config *V1RegistriesConf) hasSetField() bool { + return !reflect.DeepEqual(*config, V1RegistriesConf{}) } // V2RegistriesConf is the sysregistries v2 configuration format. @@ -257,7 +266,15 @@ func (config *V2RegistriesConf) Nonempty() bool { if !copy.shortNameAliasConf.nonempty() { copy.shortNameAliasConf = shortNameAliasConf{} } - return !reflect.DeepEqual(copy, V2RegistriesConf{}) + return copy.hasSetField() +} + +// hasSetField returns true if config contains at least one configuration entry. +// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field +// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled +// as a non-nil []string{}. +func (config *V2RegistriesConf) hasSetField() bool { + return !reflect.DeepEqual(*config, V2RegistriesConf{}) } // parsedConfig is the result of parsing, and possibly merging, configuration files; @@ -367,7 +384,7 @@ func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { } // anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. -var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") +var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") // postProcess checks the consistency of all the configuration, looks for conflicts, // and normalizes the configuration (e.g., sets the Prefix to Location if not set). @@ -923,15 +940,15 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { logrus.Debugf("Failed to decode keys %q from %q", keys, path) } - if combinedTOML.V1RegistriesConf.Nonempty() { + if combinedTOML.V1RegistriesConf.hasSetField() { // Enforce the v2 format if requested. if forceV2 { return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} } // Convert a v1 config into a v2 config. - if combinedTOML.V2RegistriesConf.Nonempty() { - return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + if combinedTOML.V2RegistriesConf.hasSetField() { + return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)} } converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() if err != nil { diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 9599aa3c9d0..285203bad4e 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,6 +2,7 @@ package tlsclientconfig import ( "crypto/tls" + "crypto/x509" "fmt" "net" "net/http" @@ -10,8 +11,6 @@ import ( "strings" "time" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" "github.com/sirupsen/logrus" ) @@ -47,7 +46,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { return err } if tlsc.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() + systemPool, err := x509.SystemCertPool() if err != nil { return fmt.Errorf("unable to get system cert pool: %w", err) } @@ -103,8 +102,5 @@ func NewTransport() *http.Transport { // TODO(dmcgowan): Call close idle connections when complete and use keep alive DisableKeepAlives: true, } - if _, err := sockets.DialerFromEnvironment(direct); err != nil { - logrus.Debugf("Can't execute DialerFromEnvironment: %v", err) - } return tr } diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go new file mode 100644 index 00000000000..2e7f44ce5f1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -0,0 +1,174 @@ +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/fulcio/pkg/certificate" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. +// Users should call validate() on the policy before using it. +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + if f.oidcIssuer == "" { + return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer") + } + if f.subjectEmail == "" { + return errors.New("Internal inconsistency: Fulcio use set up without subject email") + } + return nil +} + +func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + // == Verify the certificate is correctly signed + var untrustedIntermediatePool *x509.CertPool // = nil + // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar, + // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load + // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert + // for intermediate certificates. + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + untrustedIntermediatePool = x509.NewCertPool() + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: + break // OK + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } + untrustedCertificate := untrustedLeafCerts[0] + + // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; + // we match SAN ourselves, so override that. + if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 { + var remaining []asn1.ObjectIdentifier + for _, oid := range untrustedCertificate.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + remaining = append(remaining, oid) + } + } + untrustedCertificate.UnhandledCriticalExtensions = remaining + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: f.caCertificates, + // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation), + // and validates the leaf certificate against relevantTime manually. + // We verify the full certificate chain against relevantTime instead. + // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference. + CurrentTime: relevantTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied). + // + // We don’t currently do that. + // + // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that + // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by + // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor. + // + // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly + // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative + // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually + // logging in using OpenID are not going to maintain a record of those actions. + // + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicous signatures + // by correctly-issued certificates. + // + // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, + // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to + // make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + + // == Validate the recorded OIDC issuer + gotOIDCIssuer := false + var oidcIssuer string + // certificate.ParseExtensions doesn’t reject duplicate extensions. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. With Go 1.19, we could call certificate.ParseExtensions again. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { + if gotOIDCIssuer { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return nil, internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer extension") + } + oidcIssuer = string(untrustedExt.Value) + gotOIDCIssuer = true + } + } + if !gotOIDCIssuer { + return nil, internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } + if oidcIssuer != f.oidcIssuer { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) + } + + // == Validate the OIDC subject + foundEmail := false + // TO DO: Use slices.Contains after we update to Go 1.18 + for _, certEmail := range untrustedCertificate.EmailAddresses { + if certEmail == f.subjectEmail { + foundEmail = true + break + } + } + if !foundEmail { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)", + f.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + // FIXME: Match more subject types? Cosign does: + // - .DNSNames (can’t be issued by Fulcio) + // - .IPAddresses (can’t be issued by Fulcio) + // - .URIs (CAN be issued by Fulcio) + // - OtherName values in SAN (CAN be issued by Fulcio) + // - Various values about GitHub workflows (CAN be issued by Fulcio) + // What does it… mean to get an OAuth2 identity for an IP address? + // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for + // issuers and/or subjects and/or combinations? Regexps? More? + + return untrustedCertificate.PublicKey, nil +} + +func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes, + untrustedBase64Signature, untrustedPayloadBytes) + if err != nil { + return nil, err + } + return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes) +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go new file mode 100644 index 00000000000..27c2c7e6372 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/models" +) + +// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. +// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. +const HashedRekordV001APIVersion = "0.0.1" + +// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET +// (note that this a signature-specific format, not a format directly used by the Rekor API). +// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder. +type UntrustedRekorSET struct { + UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload + UntrustedPayload json.RawMessage +} + +type UntrustedRekorPayload struct { + Body []byte // In cosign, this is an interface{}, but only a string works + IntegratedTime int64 + LogIndex int64 + LogID string +} + +// A compile-time check that UntrustedRekorSET implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, + "Payload": &s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler +var _ json.Marshaler = UntrustedRekorSET{} +var _ json.Marshaler = (*UntrustedRekorSET)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, + "Payload": s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { + err := p.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "body": &p.Body, + "integratedTime": &p.IntegratedTime, + "logIndex": &p.LogIndex, + "logID": &p.LogID, + }) +} + +// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler +var _ json.Marshaler = UntrustedRekorPayload{} +var _ json.Marshaler = (*UntrustedRekorPayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "body": p.Body, + "integratedTime": p.IntegratedTime, + "logIndex": p.LogIndex, + "logID": p.LogID, + }) +} + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + // FIXME: Should the publicKey parameter hard-code ecdsa? + + // == Parse SET bytes + var untrustedSET UntrustedRekorSET + // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature... + if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil { + return time.Time{}, NewInvalidSignatureError(err.Error()) + } + // == Verify SET signature + // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary, + // assuming jsoncanonicalizer is designed to operate on untrusted data. + untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) + } + untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) + if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") + } + + // == Parse SET payload + // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation, + // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker + // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing + // of the SET payload. + var rekorPayload UntrustedRekorPayload + if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) + } + // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal + // hashedRekor.Spec and so on. + // Especially if we anticipate needing to decode different data formats… + // That would also allow being much more strict about JSON. + // + // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place. + var hashedRekord models.Hashedrekord + if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) + } + // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + if hashedRekord.APIVersion == nil { + return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") + } + if *hashedRekord.APIVersion != HashedRekordV001APIVersion { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) + } + hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) + if err != nil { + // Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled, + // so this should never fail. + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) + } + var hashedRekordV001 models.HashedrekordV001Schema + if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) + } + + // == Match unverifiedKeyOrCertBytes + if hashedRekordV001.Signature == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`) + } + if hashedRekordV001.Signature.PublicKey == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`) + + } + rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content) + if rekorKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data") + } + // FIXME: For public keys, let the caller provide the DER-formatted blob instead + // of round-tripping through PEM. + unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes) + if unverifiedKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data") + } + // NOTE: This compares the PEM payload, but not the object type or headers. + if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match") + } + // == Match unverifiedSignatureBytes + unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err)) + } + if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v", + string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes))) + } + + // == Match unverifiedPayloadBytes + if hashedRekordV001.Data == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash.Algorithm == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) + } + if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) + } + if hashedRekordV001.Data.Hash.Value == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`) + } + rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err)) + + } + unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes) + if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) { + return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match") + } + + // == All OK; return the relevant time. + return time.Unix(rekorPayload.IntegratedTime, 0), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index bb5e9139d76..afacd820348 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -46,7 +46,8 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer } } -// Compile-time check that UntrustedSigstorePayload implements json.Marshaler +// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = UntrustedSigstorePayload{} var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index f8fdce2da5f..5ca4ad71301 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -19,13 +19,13 @@ import ( "fmt" "os" "path/filepath" - "regexp" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/signature/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" ) // systemDefaultPolicyPath is the policy path used for DefaultPolicy(). @@ -518,107 +518,6 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { return nil } -// newPRSigstoreSigned returns a new prSigstoreSigned if parameters are valid. -func newPRSigstoreSigned(keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") - } - if signedIdentity == nil { - return nil, InvalidPolicyFormatError("signedIdentity not specified") - } - return &prSigstoreSigned{ - prCommon: prCommon{Type: prTypeSigstoreSigned}, - KeyPath: keyPath, - KeyData: keyData, - SignedIdentity: signedIdentity, - }, nil -} - -// newPRSigstoreSignedKeyPath is NewPRSigstoreSignedKeyPath, except it returns the private type. -func newPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { - return newPRSigstoreSigned(keyPath, nil, signedIdentity) -} - -// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath -func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSigstoreSignedKeyPath(keyPath, signedIdentity) -} - -// newPRSigstoreSignedKeyData is NewPRSigstoreSignedKeyData, except it returns the private type. -func newPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { - return newPRSigstoreSigned("", keyData, signedIdentity) -} - -// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData -func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSigstoreSignedKeyData(keyData, signedIdentity) -} - -// Compile-time check that prSigstoreSigned implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSigstoreSigned)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { - *pr = prSigstoreSigned{} - var tmp prSigstoreSigned - var gotKeyPath, gotKeyData = false, false - var signedIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "type": - return &tmp.Type - case "keyPath": - gotKeyPath = true - return &tmp.KeyPath - case "keyData": - gotKeyData = true - return &tmp.KeyData - case "signedIdentity": - return &signedIdentity - default: - return nil - } - }); err != nil { - return err - } - - if tmp.Type != prTypeSigstoreSigned { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - if signedIdentity == nil { - tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() - } else { - si, err := newPolicyReferenceMatchFromJSON(signedIdentity) - if err != nil { - return err - } - tmp.SignedIdentity = si - } - - var res *prSigstoreSigned - var err error - switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: - res, err = newPRSigstoreSignedKeyPath(tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: - res, err = newPRSigstoreSignedKeyData(tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData must be specified") - default: // Coverage: This should never happen - return fmt.Errorf("Impossible keyPath/keyData presence combination!?") - } - if err != nil { - // Coverage: This cannot currently happen, creating a prSigstoreSigned only fails - // if signedIdentity is nil, which we replace with a default above. - return err - } - *pr = *res - - return nil -} - // newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { var typeField prmCommon @@ -829,12 +728,12 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { // Private objects for validateIdentityRemappingPrefix var ( // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) - remapIdentityDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; // we need this because reference.NameRegexp accepts short names with docker.io implied. - remapIdentityDomainPrefixRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "/") + remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/") // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) - remapIdentityNameRegexp = regexp.MustCompile("^" + reference.NameRegexp.String() + "$") + remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$") ) // validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go new file mode 100644 index 00000000000..eeec6dc3ebc --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -0,0 +1,343 @@ +package signature + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/signature/internal" +) + +// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned +type PRSigstoreSignedOption func(*prSigstoreSigned) error + +// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPath != "" { + return errors.New(`"keyPath" already specified`) + } + pr.KeyPath = keyPath + return nil + } +} + +// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyData != nil { + return errors.New(`"keyData" already specified`) + } + pr.KeyData = keyData + return nil + } +} + +// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.Fulcio != nil { + return errors.New(`"fulcio" already specified`) + } + pr.Fulcio = fulcio + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPath != "" { + return errors.New(`"rekorPublicKeyPath" already specified`) + } + pr.RekorPublicKeyPath = rekorPublicKeyPath + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyData != nil { + return errors.New(`"rekorPublicKeyData" already specified`) + } + pr.RekorPublicKeyData = rekorPublicKeyData + return nil + } +} + +// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.SignedIdentity != nil { + return errors.New(`"signedIdentity" already specified`) + } + pr.SignedIdentity = signedIdentity + return nil + } +} + +// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type. +func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) { + res := prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + } + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + keySources := 0 + if res.KeyPath != "" { + keySources++ + } + if res.KeyData != nil { + keySources++ + } + if res.Fulcio != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified") + } + + if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil { + return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously") + } + if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil { + return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used") + } + + if res.SignedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + + return &res, nil +} + +// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options. +func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) { + return newPRSigstoreSigned(options...) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyPath(keyPath), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyData(keyData), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool + var fulcio prSigstoreSignedFulcio + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "fulcio": + gotFulcio = true + return &fulcio + case "rekorPublicKeyPath": + gotRekorPublicKeyPath = true + return &tmp.RekorPublicKeyPath + case "rekorPublicKeyData": + gotRekorPublicKeyData = true + return &tmp.RekorPublicKeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var opts []PRSigstoreSignedOption + if gotKeyPath { + opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) + } + if gotKeyData { + opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) + } + if gotFulcio { + opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) + } + if gotRekorPublicKeyPath { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) + } + if gotRekorPublicKeyData { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) + } + opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) + + res, err := newPRSigstoreSigned(opts...) + if err != nil { + return err + } + *pr = *res + return nil +} + +// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio +type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error + +// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAPath != "" { + return errors.New(`"caPath" already specified`) + } + f.CAPath = caPath + return nil + } +} + +// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAData != nil { + return errors.New(`"caData" already specified`) + } + f.CAData = caData + return nil + } +} + +// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.OIDCIssuer != "" { + return errors.New(`"oidcIssuer" already specified`) + } + f.OIDCIssuer = oidcIssuer + return nil + } +} + +// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.SubjectEmail != "" { + return errors.New(`"subjectEmail" already specified`) + } + f.SubjectEmail = subjectEmail + return nil + } +} + +// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type +func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) { + res := prSigstoreSignedFulcio{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CAPath != "" && res.CAData != nil { + return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously") + } + if res.CAPath == "" && res.CAData == nil { + return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified") + } + if res.OIDCIssuer == "" { + return nil, InvalidPolicyFormatError("oidcIssuer not specified") + } + if res.SubjectEmail == "" { + return nil, InvalidPolicyFormatError("subjectEmail not specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options. +func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) { + return newPRSigstoreSignedFulcio(options...) +} + +// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil) + +func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { + *f = prSigstoreSignedFulcio{} + var tmp prSigstoreSignedFulcio + var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "caPath": + gotCAPath = true + return &tmp.CAPath + case "caData": + gotCAData = true + return &tmp.CAData + case "oidcIssuer": + gotOIDCIssuer = true + return &tmp.OIDCIssuer + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedFulcioOption + if gotCAPath { + opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath)) + } + if gotCAData { + opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData)) + } + if gotOIDCIssuer { + opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail)) + } + + res, err := newPRSigstoreSignedFulcio(opts...) + if err != nil { + return err + } + + *f = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go index 2edf8397c2d..533a997b1cb 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -172,10 +172,10 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic // but it does not necessarily mean that the contents of the signature are // consistent with local policy. // For example: -// - Do not use a an existence of an accepted signature to determine whether to run -// a container based on this image; use IsRunningImageAllowed instead. -// - Just because a signature is accepted does not automatically mean the contents of the -// signature are authorized to run code as root, or to affect system or cluster configuration. +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { if err := pc.changeState(pcReady, pcInUse); err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go index ccf1d80ac8d..dcf5592a8ee 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -4,6 +4,9 @@ package signature import ( "context" + "crypto" + "crypto/ecdsa" + "crypto/x509" "errors" "fmt" "os" @@ -17,6 +20,100 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" ) +// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set, +// and returns the referenced data, or nil if neither is set. +func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) { + switch { + case data != nil && path != "": + return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix) + case path != "": + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return d, nil + case data != nil: + return data, nil + default: // Nothing + return nil, nil + } +} + +// prepareTrustRoot creates a fulcioTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) +func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { + caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath) + if err != nil { + return nil, err + } + if caCertBytes == nil { + return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`) + } + certs := x509.NewCertPool() + if ok := certs.AppendCertsFromPEM(caCertBytes); !ok { + return nil, errors.New("error loading Fulcio CA certificates") + } + fulcio := fulcioTrustRoot{ + caCertificates: certs, + oidcIssuer: f.OIDCIssuer, + subjectEmail: f.SubjectEmail, + } + if err := fulcio.validate(); err != nil { + return nil, err + } + return &fulcio, nil +} + +// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy +type sigstoreSignedTrustRoot struct { + publicKey crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKey *ecdsa.PublicKey +} + +func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { + res := sigstoreSignedTrustRoot{} + + publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath) + if err != nil { + return nil, err + } + if publicKeyPEM != nil { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) + if err != nil { + return nil, fmt.Errorf("parsing public key: %w", err) + } + res.publicKey = pk + } + + if pr.Fulcio != nil { + f, err := pr.Fulcio.prepareTrustRoot() + if err != nil { + return nil, err + } + res.fulcio = f + } + + rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath) + if err != nil { + return nil, err + } + if rekorPublicKeyPEM != nil { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key: %w", err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key is not using ECDSA") + + } + res.rekorPublicKey = pkECDSA + } + + return &res, nil +} + func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { // We don’t know of a single user of this API, and we might return unexpected values in Signature. // For now, just punt. @@ -24,24 +121,10 @@ func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image } func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } // FIXME: move this to per-context initialization - var publicKeyPEM []byte - if pr.KeyData != nil { - publicKeyPEM = pr.KeyData - } else { - d, err := os.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, err - } - publicKeyPEM = d - } - - publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) + trustRoot, err := pr.prepareTrustRoot() if err != nil { - return sarRejected, fmt.Errorf("parsing public key: %w", err) + return sarRejected, err } untrustedAnnotations := sig.UntrustedAnnotations() @@ -49,8 +132,66 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva if !ok { return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) } + untrustedPayload := sig.UntrustedPayload() + + var publicKey crypto.PublicKey + switch { + case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified") + case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified") - signature, err := internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(), untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + case trustRoot.publicKey != nil: + if trustRoot.rekorPublicKey != nil { + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + + } + // We don’t care about the Rekor timestamp, just about log presence. + if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil { + return sarRejected, err + } + } + publicKey = trustRoot.publicKey + + case trustRoot.fulcio != nil: + if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") + } + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio, + []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) + if err != nil { + return sarRejected, err + } + publicKey = pk + } + + if publicKey == nil { + // Coverage: This should never happen, we have already excluded the possibility in the switch above. + return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") + } + signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ ValidateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go index 9e837452a7c..96e91a0a9c8 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_types.go +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -111,13 +111,24 @@ type prSignedBaseLayer struct { type prSigstoreSigned struct { prCommon - // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath and KeyData must be specified. + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath and KeyData must be specified. + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified. KeyData []byte `json:"keyData,omitempty"` // FIXME: Multiple public keys? - // FIXME: Support fulcio+rekor as an alternative. + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. + Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` + + // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional + // (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional + // (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` // SignedIdentity specifies what image identity the signature must be claiming about the image. // Defaults to "matchRepoDigestOrExact" if not specified. @@ -125,6 +136,26 @@ type prSigstoreSigned struct { SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` } +// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement. +// This is a public type with a single private implementation. +type PRSigstoreSignedFulcio interface { + // toFulcioTrustRoot creates a fulcioTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) + prepareTrustRoot() (*fulcioTrustRoot, error) +} + +// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned +type prSigstoreSignedFulcio struct { + // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified. + CAPath string `json:"caPath,omitempty"` + // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified. + CAData []byte `json:"caData,omitempty"` + // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates. + OIDCIssuer string `json:"oidcIssuer,omitempty"` + // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates. + SubjectEmail string `json:"subjectEmail,omitempty"` +} + // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. // The type is public, but its implementation is private. diff --git a/vendor/github.com/containers/image/v5/signature/signer/signer.go b/vendor/github.com/containers/image/v5/signature/signer/signer.go new file mode 100644 index 00000000000..73ae550aa55 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signer/signer.go @@ -0,0 +1,9 @@ +package signer + +import "github.com/containers/image/v5/internal/signer" + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// It can only be created from within the containers/image package; it can’t be implemented externally. +// +// The owner of a Signer must call Close() when done. +type Signer = signer.Signer diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go index dbc03ec0a05..0233c4cb86b 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" + "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" "github.com/theupdateframework/go-tuf/encrypted" ) @@ -68,3 +69,31 @@ func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { return nil, errors.New("unsupported key type") } } + +// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair +// loadPrivateKey always requires a encryption, so this always requires a passphrase. +func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) { + x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("x509 encoding private key: %w", err) + } + + encBytes, err := encrypted.Encrypt(x509Encoded, password) + if err != nil { + return nil, nil, err + } + + // store in PEM format + privBytes := pem.EncodeToMemory(&pem.Block{ + Bytes: encBytes, + Type: sigstorePrivateKeyPemType, + }) + + // Now do the public key + pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return nil, nil, err + } + + return privBytes, pubBytes, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/generate.go b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go new file mode 100644 index 00000000000..77520c12327 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go @@ -0,0 +1,35 @@ +package sigstore + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller. +type GenerateKeyPairResult struct { + PublicKey []byte + PrivateKey []byte +} + +// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format, +// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase). +// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API, +// and can change with best practices over time. +func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) { + // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes + // only requires ECDSA-P256 to be supported, so that’s what we must use. + rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + // Coverage: This can fail only if the randomness source fails + return nil, err + } + private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase) + if err != nil { + return nil, err + } + return &GenerateKeyPairResult{ + PublicKey: public, + PrivateKey: private, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go new file mode 100644 index 00000000000..c6258f408f2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +type Option func(*SigstoreSigner) error + +// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures. +// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the +// dependency impact. +type SigstoreSigner struct { + PrivateKey sigstoreSignature.Signer // May be nil during initialization + SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey + + // Fulcio results to include + FulcioGeneratedCertificate []byte // Or nil + FulcioGeneratedCertificateChain []byte // Or nil + + // Rekor state + RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *SigstoreSigner) ProgressMessage() string { + return "Signing image using a sigstore signature" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) { + if s.PrivateKey == nil { + return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner") + } + + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return nil, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + var rekorSETBytes []byte // = nil + if s.RekorUploader != nil { + set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes) + if err != nil { + return nil, err + } + rekorSETBytes = set + } + + annotations := map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + } + if s.FulcioGeneratedCertificate != nil { + annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate) + } + if s.FulcioGeneratedCertificateChain != nil { + annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain) + } + if rekorSETBytes != nil { + annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes) + } + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil +} + +func (s *SigstoreSigner) Close() error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/sign.go b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go deleted file mode 100644 index daa6ab387cf..00000000000 --- a/vendor/github.com/containers/image/v5/signature/sigstore/sign.go +++ /dev/null @@ -1,65 +0,0 @@ -package sigstore - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "os" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/signature/internal" - sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" -) - -// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference, -// using a private key and an optional passphrase. -// -// Yes, this returns an internal type, and should currently not be used outside of c/image. -// There is NO COMITTMENT TO STABLE API. -func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) { - privateKeyPEM, err := os.ReadFile(privateKeyFile) - if err != nil { - return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err) - } - signer, err := loadPrivateKey(privateKeyPEM, passphrase) - if err != nil { - return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err) - } - - return signDockerManifest(m, dockerReference, signer) -} - -func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) { - if reference.IsNameOnly(dockerReference) { - return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) - } - manifestDigest, err := manifest.Digest(m) - if err != nil { - return signature.Sigstore{}, err - } - // sigstore/cosign completely ignores dockerReference for actual policy decisions. - // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. - // So, just do what simple signing does, and cosign won’t mind. - payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) - payloadBytes, err := json.Marshal(payloadData) - if err != nil { - return signature.Sigstore{}, err - } - - // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), - // which seems to be not used by anything. So we don’t bother. - signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes)) - if err != nil { - return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err) - } - base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) - - return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, - payloadBytes, - map[string]string{ - signature.SigstoreSignatureAnnotationKey: base64Signature, - }), nil -} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go new file mode 100644 index 00000000000..fb825ada9de --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go @@ -0,0 +1,60 @@ +package sigstore + +import ( + "errors" + "fmt" + "os" + + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/signature/sigstore/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Option = internal.Option + +func WithPrivateKeyFile(file string, passphrase []byte) Option { + return func(s *internal.SigstoreSigner) error { + if s.PrivateKey != nil { + return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures") + } + + if passphrase == nil { + return errors.New("private key passphrase not provided") + } + + privateKeyPEM, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("reading private key from %s: %w", file, err) + } + signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return fmt.Errorf("initializing private key: %w", err) + } + publicKey, err := signerVerifier.PublicKey() + if err != nil { + return fmt.Errorf("getting public key from private key: %w", err) + } + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return fmt.Errorf("converting public key to PEM: %w", err) + } + s.PrivateKey = signerVerifier + s.SigningKeyOrCert = publicKeyPEM + return nil + } +} + +func NewSigner(opts ...Option) (*signer.Signer, error) { + s := internal.SigstoreSigner{} + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.PrivateKey == nil { + return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures") + } + + return internalSigner.NewSigner(&s), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index 1ca571e5aa5..e91f46abaca 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -72,7 +72,8 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s } } -// Compile-time check that untrustedSignature implements json.Marshaler +// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler +var _ json.Marshaler = untrustedSignature{} var _ json.Marshaler = (*untrustedSignature)(nil) // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go new file mode 100644 index 00000000000..983bbb10b51 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go @@ -0,0 +1,105 @@ +package simplesigning + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + internalSig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" +) + +// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures. +type simpleSigner struct { + mech signature.SigningMechanism + keyFingerprint string + passphrase string // "" if not provided. +} + +type Option func(*simpleSigner) error + +// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint. +func WithKeyFingerprint(keyFingerprint string) Option { + return func(s *simpleSigner) error { + s.keyFingerprint = keyFingerprint + return nil + } +} + +// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key. +// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry. +func WithPassphrase(passphrase string) Option { + return func(s *simpleSigner) error { + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return errors.New("invalid passphrase: must not contain a line break") + } + s.passphrase = passphrase + return nil + } +} + +// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg). +// +// The set of options must identify a key to sign with, probably using a WithKeyFingerprint. +// +// The caller must call Close() on the returned Signer. +func NewSigner(opts ...Option) (*signer.Signer, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, fmt.Errorf("initializing GPG: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + mech.Close() + } + }() + if err := mech.SupportsSigning(); err != nil { + return nil, fmt.Errorf("Signing not supported: %w", err) + } + + s := simpleSigner{ + mech: mech, + } + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.keyFingerprint == "" { + return nil, errors.New("no key identity provided for simple signing") + } + // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that. + + succeeded = true + return internalSigner.NewSigner(&s), nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *simpleSigner) ProgressMessage() string { + return "Signing image using simple signing" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) { + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{ + Passphrase: s.passphrase, + }) + if err != nil { + return nil, err + } + return internalSig.SimpleSigningFromBlob(simpleSig), nil +} + +func (s *simpleSigner) Close() error { + return s.mech.Close() +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index d4288dade59..d1affc5e9cb 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -23,7 +23,6 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -89,14 +88,37 @@ func (s *storageImageSource) Close() error { // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { - if info.Digest == image.GzippedEmptyLayerDigest { + // We need a valid digest value. + digest := info.Digest + err = digest.Validate() + if err != nil { + return nil, 0, err + } + + if digest == image.GzippedEmptyLayerDigest { return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String()) + if err != nil { + return nil, 0, err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", digest.String()) + return io.NopCloser(r), int64(r.Len()), nil + } + // NOTE: the blob is first written to a temporary file and subsequently // closed. The intention is to keep the time we own the storage lock // as short as possible to allow other processes to access the storage. - rc, n, _, err = s.getBlobAndLayerID(info) + rc, n, _, err = s.getBlobAndLayerID(digest, layers) if err != nil { return nil, 0, err } @@ -106,53 +128,40 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c if err != nil { return nil, 0, err } + success := false + defer func() { + if !success { + tmpFile.Close() + } + }() + // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically + // cleaned up on process termination (or if the caller forgets to invoke Close()) + if err := os.Remove(tmpFile.Name()); err != nil { + return nil, 0, err + } if _, err := io.Copy(tmpFile, rc); err != nil { return nil, 0, err } - - if _, err := tmpFile.Seek(0, 0); err != nil { + if _, err := tmpFile.Seek(0, io.SeekStart); err != nil { return nil, 0, err } - wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error { - defer os.Remove(tmpFile.Name()) - return tmpFile.Close() - }) - - return wrapper, n, err + success = true + return tmpFile, n, nil } // getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { +func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) { var layer storage.Layer var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return io.NopCloser(r), int64(r.Len()), "", nil - } // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. s.getBlobMutex.Lock() - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 + i := s.layerPosition[digest] + s.layerPosition[digest] = i + 1 s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] @@ -168,7 +177,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC } else { n = layer.UncompressedSize } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest) rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err != nil { return nil, -1, "", err diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go index e9d321b8f87..064c78b1776 100644 --- a/vendor/github.com/containers/image/v5/tarball/doc.go +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -2,6 +2,7 @@ // tarballs and an optional template configuration. // // An example: +// // package main // // import ( diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index dcff8caf76b..33a54566c24 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -125,6 +125,13 @@ type BlobInfo struct { URLs []string Annotations map[string]string MediaType string + + // NOTE: The following fields contain desired _edits_ to blob infos. + // Conceptually then don't belong in the BlobInfo object at all; + // the edits should be provided specifically as parameters to the edit implementation. + // We can’t remove the fields without breaking compatibility, but don’t + // add any more. + // CompressionOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer's "compressed or not" should be preserved, // possibly while changing the compression algorithm from one to another, @@ -144,6 +151,7 @@ type BlobInfo struct { // TODO: To remove together with CompressionOperation in re-design to // remove field out out of BlobInfo. CryptoOperation LayerCrypto + // Before adding any fields to this struct, read the NOTE above. } // BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. @@ -177,24 +185,25 @@ type BICReplacementCandidate struct { // BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. // // It records two kinds of data: -// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: -// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), -// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ // -// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known -// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). // -// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently -// compress/decompress blobs for their own purposes. +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. // -// - Known blob locations, managed by individual transports: -// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), -// recording transport-specific information that allows the transport to reuse the blob in the future; -// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. // -// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs -// can be directly reused within a registry, or mounted across registries within a registry server.) +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) // // None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; // users of the cache should just fall back to copying the blobs the usual way. @@ -465,7 +474,17 @@ type ImageInspectInfo struct { Variant string Os string Layers []string + LayersData []ImageInspectLayer Env []string + Author string +} + +// ImageInspectLayer is a set of metadata describing an image layers' detail +type ImageInspectLayer struct { + MIMEType string // "" if unknown. + Digest digest.Digest + Size int64 // -1 if unknown. + Annotations map[string]string } // DockerAuthConfig contains authorization information for connecting to a registry. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index d69adb54ef3..23c4982e77a 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 22 + VersionMinor = 24 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md index 83b061c70b2..a7d8acbfcf1 100644 --- a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -1,3 +1,3 @@ ## The libtrust Project Community Code of Conduct -The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md index fab2c41e89b..966f4f0533c 100644 --- a/vendor/github.com/containers/libtrust/SECURITY.md +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -1,3 +1,3 @@ ## Security and Disclosure Information Policy for the libtrust Project -The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/.gitignore b/vendor/github.com/containers/ocicrypt/.gitignore new file mode 100644 index 00000000000..b25c15b81fa --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml new file mode 100644 index 00000000000..12994baff94 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -0,0 +1,31 @@ +linters: + enable: + - depguard + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + +linters-settings: + depguard: + list-type: denylist + include-go-root: true + packages: + # use "io" or "os" instead + # https://go.dev/doc/go1.16#ioutil + - io/ioutil + + revive: + severity: error + rules: + - name: indent-error-flow + severity: warning + disabled: false + + - name: error-strings + disabled: false diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml deleted file mode 100644 index 1036c8d3f4f..00000000000 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -dist: bionic -language: go - -os: -- linux - -go: - - "1.13.x" - - "1.16.x" - -matrix: - include: - - os: linux - -addons: - apt: - packages: - - gnutls-bin - - softhsm2 - -go_import_path: github.com/containers/ocicrypt - -install: - - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2 - -script: - - make - - make check - - make test diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go index da403d95dad..0c485d514c9 100644 --- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -17,10 +17,11 @@ package blockcipher import ( + "errors" + "fmt" "io" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // LayerCipherType is the ciphertype as specified in the layer metadata @@ -129,7 +130,7 @@ func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCi } return encDataReader, fin, err } - return nil, nil, errors.Errorf("unsupported cipher type: %s", typ) + return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ) } // Decrypt is the handler for the layer decryption routine @@ -138,10 +139,10 @@ func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBloc if typ == "" { return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") } - if c, ok := h.cipherMap[LayerCipherType(typ)]; ok { + if c, ok := h.cipherMap[typ]; ok { return c.Decrypt(encDataReader, opt) } - return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ) + return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ) } // NewLayerBlockCipherHandler returns a new default handler @@ -153,7 +154,7 @@ func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { var err error h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) if err != nil { - return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR") + return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err) } return &h, nil diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go index 095a53e354d..7db03e2ecda 100644 --- a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -22,12 +22,12 @@ import ( "crypto/hmac" "crypto/rand" "crypto/sha256" + "errors" "fmt" "hash" "io" "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" ) // AESCTRLayerBlockCipher implements the AES CTR stream cipher @@ -74,7 +74,7 @@ func (r *aesctrcryptor) Read(p []byte) (int, error) { if !r.bc.encrypt { if _, err := r.bc.hmac.Write(p[:o]); err != nil { - r.bc.err = errors.Wrapf(err, "could not write to hmac") + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) return 0, r.bc.err } @@ -92,7 +92,7 @@ func (r *aesctrcryptor) Read(p []byte) (int, error) { if r.bc.encrypt { if _, err := r.bc.hmac.Write(p[:o]); err != nil { - r.bc.err = errors.Wrapf(err, "could not write to hmac") + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) return 0, r.bc.err } @@ -120,13 +120,13 @@ func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts Laye if !ok { nonce = make([]byte, aes.BlockSize) if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce") + return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err) } } block, err := aes.NewCipher(key) if err != nil { - return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed") + return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err) } bc.reader = reader diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go index c537a20a0a1..f7f29cd8d90 100644 --- a/vendor/github.com/containers/ocicrypt/config/constructors.go +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -17,10 +17,11 @@ package config import ( - "github.com/containers/ocicrypt/crypto/pkcs11" + "errors" + "fmt" "strings" - "github.com/pkg/errors" + "github.com/containers/ocicrypt/crypto/pkcs11" "gopkg.in/yaml.v3" ) @@ -85,7 +86,7 @@ func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Y } p11confYaml, err := yaml.Marshal(pkcs11Config) if err != nil { - return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) } dc = DecryptConfig{ @@ -223,7 +224,7 @@ func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { p11confYaml, err := yaml.Marshal(pkcs11Config) if err != nil { - return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) } dc := DecryptConfig{ diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go index b454b371631..4785a831b51 100644 --- a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -18,8 +18,7 @@ package config import ( "encoding/json" - "github.com/pkg/errors" - "io/ioutil" + "fmt" "os" ) @@ -52,7 +51,7 @@ func parseConfigFile(filename string) (*OcicryptConfig, error) { return nil, nil } - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -72,7 +71,7 @@ func GetConfiguration() (*OcicryptConfig, error) { if len(filename) > 0 { ic, err = parseConfigFile(filename) if err != nil { - return nil, errors.Wrap(err, "Error while parsing keyprovider config file") + return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err) } } else { return nil, nil diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go new file mode 100644 index 00000000000..b4f0e4d37cd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go @@ -0,0 +1,123 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11config + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "gopkg.in/yaml.v3" +) + +// OcicryptConfig represents the format of an imgcrypt.conf config file +type OcicryptConfig struct { + Pkcs11Config pkcs11.Pkcs11Config `yaml:"pkcs11"` +} + +const CONFIGFILE = "ocicrypt.conf" +const ENVVARNAME = "OCICRYPT_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +// A config file may look like this: +// module-directories: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +// allowed-module-paths: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = yaml.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// 1) ${OCICRYPT_CONFIG} == "internal": use internal default allow-all policy +// 2) ${OCICRYPT_CONFIG} +// 3) ${XDG_CONFIG_HOME}/ocicrypt-pkcs11.conf +// 4) ${HOME}/.config/ocicrypt-pkcs11.conf +// 5) /etc/ocicrypt-pkcs11.conf +// If no configuration file could be found or read a null pointer is returned +func getConfiguration() (*OcicryptConfig, error) { + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + if filename == "internal" { + return getDefaultCryptoConfigOpts() + } + ic, err := parseConfigFile(filename) + if err != nil || ic != nil { + return ic, err + } + } + envvar := os.Getenv("XDG_CONFIG_HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + envvar = os.Getenv("HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, ".config", CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + return parseConfigFile(path.Join("etc", CONFIGFILE)) +} + +// getDefaultCryptoConfigOpts returns default crypto config opts needed for pkcs11 module access +func getDefaultCryptoConfigOpts() (*OcicryptConfig, error) { + mdyaml := pkcs11.GetDefaultModuleDirectoriesYaml("") + config := fmt.Sprintf("module-directories:\n"+ + "%s"+ + "allowed-module-paths:\n"+ + "%s", mdyaml, mdyaml) + p11conf, err := pkcs11.ParsePkcs11ConfigFile([]byte(config)) + return &OcicryptConfig{ + Pkcs11Config: *p11conf, + }, err +} + +// GetUserPkcs11Config gets the user's Pkcs11Conig either from a configuration file or if none is +// found the default ones are returned +func GetUserPkcs11Config() (*pkcs11.Pkcs11Config, error) { + fmt.Print("Note: pkcs11 support is currently experimental\n") + ic, err := getConfiguration() + if err != nil { + return &pkcs11.Pkcs11Config{}, err + } + if ic == nil { + return &pkcs11.Pkcs11Config{}, errors.New("No ocicrypt config file was found") + } + return &ic.Pkcs11Config, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go index c6d47e8300a..473e23ff5df 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -15,7 +15,7 @@ package pkcs11 import ( "fmt" - "github.com/pkg/errors" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" "gopkg.in/yaml.v3" ) @@ -42,7 +42,7 @@ func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { p11uri := pkcs11uri.New() err := p11uri.Parse(uri) if err != nil { - return nil, errors.Wrapf(err, "Could not parse Pkcs11URI from file") + return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err) } return p11uri, err } @@ -50,14 +50,14 @@ func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { // ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. // The file has the following yaml format: // pkcs11: -// - uri : +// - uri : // An error is returned if the pkcs11 URI is malformed func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { p11keyfile := Pkcs11KeyFile{} - err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile) + err := yaml.Unmarshal(yamlstr, &p11keyfile) if err != nil { - return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile") + return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err) } p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) @@ -126,9 +126,9 @@ func GetDefaultModuleDirectoriesYaml(indent string) string { func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { p11conf := Pkcs11Config{} - err := yaml.Unmarshal([]byte(yamlstr), &p11conf) + err := yaml.Unmarshal(yamlstr, &p11conf) if err != nil { - return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config") + return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err) } return &p11conf, nil } diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go index 7d80f5f844b..fe047a1e62c 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -1,3 +1,4 @@ +//go:build cgo // +build cgo /* @@ -25,6 +26,7 @@ import ( "crypto/sha256" "encoding/base64" "encoding/json" + "errors" "fmt" "hash" "net/url" @@ -33,7 +35,6 @@ import ( "strings" "github.com/miekg/pkcs11" - "github.com/pkg/errors" pkcs11uri "github.com/stefanberger/go-pkcs11uri" ) @@ -76,11 +77,11 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri hashfunc = sha256.New() hashalg = "sha256" default: - return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) } ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) if err != nil { - return nil, "", errors.Wrapf(err, "rss.EncryptOAEP failed") + return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err) } return ciphertext, hashalg, nil @@ -104,7 +105,7 @@ func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperatio module, err := p11uri.GetModule() if err != nil { - return "", "", 0, errors.Wrap(err, "No module available in pkcs11 URI") + return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err) } slotid := int64(-1) @@ -113,7 +114,7 @@ func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperatio if ok { slotid, err = strconv.ParseInt(slot, 10, 64) if err != nil { - return "", "", 0, errors.Wrap(err, "slot-id is not a valid number") + return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err) } if slotid < 0 { return "", "", 0, fmt.Errorf("slot-id is a negative number") @@ -138,21 +139,21 @@ func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, err // pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { - session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) if err != nil { - return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid) + return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err) } if len(pin) > 0 { err = p11ctx.Login(session, pkcs11.CKU_USER, pin) if err != nil { _ = p11ctx.CloseSession(session) - return 0, errors.Wrap(err, "Could not login to device") + return 0, fmt.Errorf("Could not login to device: %w", err) } } return session, nil } -// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get // the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise // one slot after the other will be attempted and the first one where login succeeds will be used func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { @@ -170,40 +171,40 @@ func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx if err != nil { p11Err := err.(pkcs11.Error) if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { - return nil, 0, errors.Wrap(err, "Initialize failed") + return nil, 0, fmt.Errorf("Initialize failed: %w", err) } } if slotid >= 0 { session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) return p11ctx, session, err - } else { - slots, err := p11ctx.GetSlotList(true) - if err != nil { - return nil, 0, errors.Wrap(err, "GetSlotList failed") - } + } - tokenlabel, ok := p11uri.GetPathAttribute("token", false) - if !ok { - return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") - } + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, fmt.Errorf("GetSlotList failed: %w", err) + } - for _, slot := range slots { - ti, err := p11ctx.GetTokenInfo(slot) - if err != nil || ti.Label != tokenlabel { - continue - } + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } - session, err = pkcs11OpenSession(p11ctx, slot, pin) - if err == nil { - return p11ctx, session, err - } + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue } - if len(pin) > 0 { - return nil, 0, errors.New("Could not create session to any slot and/or log in") + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err } - return nil, 0, errors.New("Could not create session to any slot") } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") } func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { @@ -233,24 +234,24 @@ func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, ke } if err := p11ctx.FindObjectsInit(session, template); err != nil { - return 0, errors.Wrap(err, "FindObjectsInit failed") + return 0, fmt.Errorf("FindObjectsInit failed: %w", err) } obj, _, err := p11ctx.FindObjects(session, 100) if err != nil { - return 0, errors.Wrap(err, "FindObjects failed") + return 0, fmt.Errorf("FindObjects failed: %w", err) } if err := p11ctx.FindObjectsFinal(session); err != nil { - return 0, errors.Wrap(err, "FindObjectsFinal failed") + return 0, fmt.Errorf("FindObjectsFinal failed: %w", err) } if len(obj) > 1 { - return 0, errors.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg) } else if len(obj) == 1 { return obj[0], nil } - return 0, errors.Errorf("Could not find any object with %s", msg) + return 0, fmt.Errorf("Could not find any object with %s", msg) } // publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext @@ -290,17 +291,17 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s oaep = OAEPSha256Params hashalg = "sha256" default: - return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) } err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) if err != nil { - return nil, "", errors.Wrap(err, "EncryptInit error") + return nil, "", fmt.Errorf("EncryptInit error: %w", err) } ciphertext, err := p11ctx.Encrypt(session, plaintext) if err != nil { - return nil, "", errors.Wrap(err, "Encrypt failed") + return nil, "", fmt.Errorf("Encrypt failed: %w", err) } return ciphertext, hashalg, nil } @@ -338,16 +339,16 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash case "sha256": oaep = OAEPSha256Params default: - return nil, errors.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) } err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) if err != nil { - return nil, errors.Wrapf(err, "DecryptInit failed") + return nil, fmt.Errorf("DecryptInit failed: %w", err) } plaintext, err := p11ctx.Decrypt(session, ciphertext) if err != nil { - return nil, errors.Wrapf(err, "Decrypt failed") + return nil, fmt.Errorf("Decrypt failed: %w", err) } return plaintext, err } @@ -373,19 +374,19 @@ type Pkcs11Recipient struct { // may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the // following format: // { -// recipients: [ // recipient list -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// [...] -// ] +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] // } func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { var ( @@ -402,7 +403,7 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { case *Pkcs11KeyFileObject: ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) default: - err = errors.Errorf("Unsupported key object type for pkcs11 public key") + err = fmt.Errorf("Unsupported key object type for pkcs11 public key") } if err != nil { return nil, err @@ -422,33 +423,32 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { // Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. // The input pkcs11blobstr is a string with the following format: // { -// recipients: [ // recipient list -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// { -// "version": 0, -// "blob": , -// "hash": -// } , -// [...] +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] // } // Note: More recent versions of this code explicitly write 'sha1' -// while older versions left it empty in case of 'sha1'. -// +// while older versions left it empty in case of 'sha1'. func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { pkcs11blob := Pkcs11Blob{} err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) if err != nil { - return nil, errors.Wrapf(err, "Could not parse Pkcs11Blob") + return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err) } switch pkcs11blob.Version { case 0: // latest supported version default: - return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version) + return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version) } // since we do trial and error, collect all encountered errors errs := "" @@ -458,7 +458,7 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, case 0: // last supported version default: - return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version) + return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version) } ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) @@ -481,5 +481,5 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, } } - return nil, errors.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) + return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) } diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go index 6edf75269f3..6cf0aa2a9f2 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -1,3 +1,4 @@ +//go:build !cgo // +build !cgo /* @@ -18,14 +19,12 @@ package pkcs11 -import ( - "github.com/pkg/errors" -) +import "fmt" func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { - return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") } func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { - return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") } diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go index 306e372d5f4..391b98bda33 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -17,12 +17,11 @@ package pkcs11 import ( + "fmt" "os" "runtime" "strings" "sync" - - "github.com/pkg/errors" ) var ( @@ -45,7 +44,7 @@ func setEnvVars(env map[string]string) ([]string, error) { err := os.Setenv(k, v) if err != nil { restoreEnv(oldenv) - return nil, errors.Wrapf(err, "Could not set environment variable '%s' to '%s'", k, v) + return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err) } } diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go index f5142cc8d06..b6fa9db40ec 100644 --- a/vendor/github.com/containers/ocicrypt/encryption.go +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -19,23 +19,23 @@ package ocicrypt import ( "encoding/base64" "encoding/json" + "errors" "fmt" - keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" - "github.com/containers/ocicrypt/keywrap/keyprovider" "io" "strings" "github.com/containers/ocicrypt/blockcipher" "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/keyprovider" "github.com/containers/ocicrypt/keywrap/pgp" "github.com/containers/ocicrypt/keywrap/pkcs11" "github.com/containers/ocicrypt/keywrap/pkcs7" "github.com/opencontainers/go-digest" - log "github.com/sirupsen/logrus" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" + log "github.com/sirupsen/logrus" ) // EncryptLayerFinalizer is a finalizer run to return the annotations to set for @@ -133,16 +133,19 @@ func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, des } privOptsData, err = json.Marshal(opts.Private) if err != nil { - return nil, errors.Wrapf(err, "could not JSON marshal opts") + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) } pubOptsData, err = json.Marshal(opts.Public) if err != nil { - return nil, errors.Wrapf(err, "could not JSON marshal opts") + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) } } newAnnotations := make(map[string]string) keysWrapped := false + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } for annotationsID, scheme := range keyWrapperAnnotations { b64Annotations := desc.Annotations[annotationsID] keywrapper := GetKeyWrapper(scheme) @@ -211,6 +214,9 @@ func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocisp func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { privKeyGiven := false errs := "" + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } for annotationsID, scheme := range keyWrapperAnnotations { b64Annotation := desc.Annotations[annotationsID] if b64Annotation != "" { @@ -237,9 +243,9 @@ func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) } } if !privKeyGiven { - return nil, errors.New("missing private key needed for decryption") + return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs) } - return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) + return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) } func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { @@ -270,7 +276,7 @@ func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64An } return optsData, nil } - return nil, errors.Errorf("no suitable key found for decrypting layer key:\n%s", errs) + return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs) } // commonEncryptLayer is a function to encrypt the plain layer using a new random @@ -305,7 +311,7 @@ func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsDa privOpts := blockcipher.PrivateLayerBlockCipherOptions{} err := json.Unmarshal(privOptsData, &privOpts) if err != nil { - return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData") + return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err) } lbch, err := blockcipher.NewLayerBlockCipherHandler() @@ -317,7 +323,7 @@ func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsDa if len(pubOptsData) > 0 { err := json.Unmarshal(pubOptsData, &pubOpts) if err != nil { - return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData") + return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err) } } diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go index b9d55539a11..3912e82dca0 100644 --- a/vendor/github.com/containers/ocicrypt/gpg.go +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -17,16 +17,17 @@ package ocicrypt import ( + "errors" "fmt" - "io/ioutil" + "io" "os" "os/exec" "regexp" "strconv" "strings" + "sync" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "golang.org/x/term" ) @@ -132,7 +133,7 @@ func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte rfile, wfile, err := os.Pipe() if err != nil { - return nil, errors.Wrapf(err, "could not create pipe") + return nil, fmt.Errorf("could not create pipe: %w", err) } defer func() { rfile.Close() @@ -272,8 +273,8 @@ func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { return nil, err } - stdoutstr, err2 := ioutil.ReadAll(stdout) - stderrstr, _ := ioutil.ReadAll(stderr) + stdoutstr, err2 := io.ReadAll(stdout) + stderrstr, _ := io.ReadAll(stderr) if err := cmd.Wait(); err != nil { return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) @@ -310,9 +311,15 @@ func resolveRecipients(gc GPGClient, recipients []string) []string { return result } -var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) +var ( + onceRegexp sync.Once + emailPattern *regexp.Regexp +) func extractEmailFromDetails(details []byte) string { + onceRegexp.Do(func() { + emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + }) loc := emailPattern.FindSubmatchIndex(details) if len(loc) == 0 { return "" @@ -352,7 +359,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault } keywrapper := GetKeyWrapper(scheme) if keywrapper == nil { - return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme) + return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme) } keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) if err != nil { @@ -411,7 +418,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault if !found && len(b64pgpPackets) > 0 && mustFindKey { ids := uint64ToStringArray("0x%x", keyIds) - return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) } } } diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go index dd9a10007c8..f1bd0d989a8 100644 --- a/vendor/github.com/containers/ocicrypt/gpgvault.go +++ b/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -18,9 +18,9 @@ package ocicrypt import ( "bytes" - "io/ioutil" + "fmt" + "os" - "github.com/pkg/errors" "golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp/packet" ) @@ -55,7 +55,7 @@ func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { r := bytes.NewReader(gpgSecretKeyRingData) entityList, err := openpgp.ReadKeyRing(r) if err != nil { - return errors.Wrapf(err, "could not read keyring") + return fmt.Errorf("could not read keyring: %w", err) } g.entityLists = append(g.entityLists, entityList) g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) @@ -76,7 +76,7 @@ func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) // AddSecretKeyRingFiles adds the secret key rings given their filenames func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { for _, filename := range filenames { - gpgSecretKeyRingData, err := ioutil.ReadFile(filename) + gpgSecretKeyRingData, err := os.ReadFile(filename) if err != nil { return err } diff --git a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go new file mode 100644 index 00000000000..18f4fa925cc --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go @@ -0,0 +1,377 @@ +package helpers + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/config/pkcs11config" + "github.com/containers/ocicrypt/crypto/pkcs11" + encutils "github.com/containers/ocicrypt/utils" +) + +// processRecipientKeys sorts the array of recipients by type. Recipients may be either +// x509 certificates, public keys, or PGP public keys identified by email address or name +func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgRecipients [][]byte + pubkeys [][]byte + x509s [][]byte + pkcs11Pubkeys [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + ) + + for _, recipient := range recipients { + + idx := strings.Index(recipient, ":") + if idx < 0 { + return nil, nil, nil, nil, nil, nil, errors.New("Invalid recipient format") + } + + protocol := recipient[:idx] + value := recipient[idx+1:] + + switch protocol { + case "pgp": + gpgRecipients = append(gpgRecipients, []byte(value)) + + case "jwe": + tmp, err := os.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) + } + if !encutils.IsPublicKey(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not a public key") + } + pubkeys = append(pubkeys, tmp) + + case "pkcs7": + tmp, err := os.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) + } + if !encutils.IsCertificate(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not an x509 cert") + } + x509s = append(x509s, tmp) + + case "pkcs11": + tmp, err := os.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, fmt.Errorf("Unable to read file: %w", err) + } + if encutils.IsPkcs11PublicKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if encutils.IsPublicKey(tmp) { + pkcs11Pubkeys = append(pkcs11Pubkeys, tmp) + } else { + return nil, nil, nil, nil, nil, nil, errors.New("Provided file is not a public key") + } + + case "provider": + keyProviders = append(keyProviders, []byte(value)) + + default: + return nil, nil, nil, nil, nil, nil, errors.New("Provided protocol not recognized") + } + } + return gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil +} + +// processx509Certs processes x509 certificate files +func processx509Certs(keys []string) ([][]byte, error) { + var x509s [][]byte + for _, key := range keys { + fileName := strings.Split(key, ":")[0] + if _, err := os.Stat(fileName); os.IsNotExist(err) { + continue + } + tmp, err := os.ReadFile(fileName) + if err != nil { + return nil, fmt.Errorf("Unable to read file: %w", err) + } + if !encutils.IsCertificate(tmp) { + continue + } + x509s = append(x509s, tmp) + + } + return x509s, nil +} + +// processPwdString process a password that may be in any of the following formats: +// - file= +// - pass= +// - fd= +// - +func processPwdString(pwdString string) ([]byte, error) { + if strings.HasPrefix(pwdString, "file=") { + return os.ReadFile(pwdString[5:]) + } else if strings.HasPrefix(pwdString, "pass=") { + return []byte(pwdString[5:]), nil + } else if strings.HasPrefix(pwdString, "fd=") { + fdStr := pwdString[3:] + fd, err := strconv.Atoi(fdStr) + if err != nil { + return nil, fmt.Errorf("could not parse file descriptor %s: %w", fdStr, err) + } + f := os.NewFile(uintptr(fd), "pwdfile") + if f == nil { + return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr) + } + defer f.Close() + pwd := make([]byte, 64) + n, err := f.Read(pwd) + if err != nil { + return nil, fmt.Errorf("could not read from file descriptor: %w", err) + } + return pwd[:n], nil + } + return []byte(pwdString), nil +} + +// processPrivateKeyFiles sorts the different types of private key files; private key files may either be +// private keys or GPG private key ring files. The private key files may include the password for the +// private key and take any of the following forms: +// - +// - :file= +// - :pass= +// - :fd= +// - : +// - keyprovider:<...> +func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgSecretKeyRingFiles [][]byte + gpgSecretKeyPasswords [][]byte + privkeys [][]byte + privkeysPasswords [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + err error + ) + // keys needed for decryption in case of adding a recipient + for _, keyfileAndPwd := range keyFilesAndPwds { + var password []byte + + // treat "provider" protocol separately + if strings.HasPrefix(keyfileAndPwd, "provider:") { + keyProviders = append(keyProviders, []byte(keyfileAndPwd[len("provider:"):])) + continue + } + parts := strings.Split(keyfileAndPwd, ":") + if len(parts) == 2 { + password, err = processPwdString(parts[1]) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + } + + keyfile := parts[0] + tmp, err := os.ReadFile(keyfile) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + isPrivKey, err := encutils.IsPrivateKey(tmp, password) + if encutils.IsPasswordError(err) { + return nil, nil, nil, nil, nil, nil, err + } + + if encutils.IsPkcs11PrivateKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if isPrivKey { + privkeys = append(privkeys, tmp) + privkeysPasswords = append(privkeysPasswords, password) + } else if encutils.IsGPGPrivateKeyRing(tmp) { + gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp) + gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password) + } else { + // ignore if file is not recognized, so as not to error if additional + // metadata/cert files exists + continue + } + } + return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil +} + +// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary +// information to perform decryption from command line options. +func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) { + ccs := []encconfig.CryptoConfig{} + + // x509 cert is needed for PKCS7 decryption + _, _, x509s, _, _, _, err := processRecipientKeys(decRecipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + // x509 certs can also be passed in via keys + x509FromKeys, err := processx509Certs(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + x509s = append(x509s, x509FromKeys...) + + gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, pkcs11Yamls, keyProviders, err := processPrivateKeyFiles(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + } + + /* TODO: Add in GPG client query for secret keys in the future. + _, err = createGPGClient(context) + gpgInstalled := err == nil + if gpgInstalled { + if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && len(pkcs11Yamls) == 0 && len(keyProviders) == 0 && descs != nil { + // Get pgp private keys from keyring only if no private key was passed + gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } else if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } + } + */ + + if len(x509s) > 0 { + x509sCc, err := encconfig.DecryptWithX509s(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, x509sCc) + } + if len(privKeys) > 0 { + privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, privKeysCc) + } + if len(pkcs11Yamls) > 0 { + p11conf, err := pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11PrivKeysCc, err := encconfig.DecryptWithPkcs11Yaml(p11conf, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, pkcs11PrivKeysCc) + } + if len(keyProviders) > 0 { + keyProviderCc, err := encconfig.DecryptWithKeyProvider(keyProviders) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, keyProviderCc) + } + return encconfig.CombineCryptoConfigs(ccs), nil +} + +// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys +func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) { + var decryptCc *encconfig.CryptoConfig + ccs := []encconfig.CryptoConfig{} + if len(keys) > 0 { + dcc, err := CreateDecryptCryptoConfig(keys, []string{}) + if err != nil { + return encconfig.CryptoConfig{}, err + } + decryptCc = &dcc + ccs = append(ccs, dcc) + } + + if len(recipients) > 0 { + gpgRecipients, pubKeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProvider, err := processRecipientKeys(recipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs := []encconfig.CryptoConfig{} + + // Create GPG client with guessed GPG version and default homedir + gpgClient, err := ocicrypt.NewGPGClient("", "") + gpgInstalled := err == nil + if len(gpgRecipients) > 0 && gpgInstalled { + gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile() + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, gpgCc) + } + + // Create Encryption Crypto Config + if len(x509s) > 0 { + pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs7Cc) + } + if len(pubKeys) > 0 { + jweCc, err := encconfig.EncryptWithJwe(pubKeys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, jweCc) + } + var p11conf *pkcs11.Pkcs11Config + if len(pkcs11Yamls) > 0 || len(pkcs11Pubkeys) > 0 { + p11conf, err = pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11Cc, err := encconfig.EncryptWithPkcs11(p11conf, pkcs11Pubkeys, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs11Cc) + } + + if len(keyProvider) > 0 { + keyProviderCc, err := encconfig.EncryptWithKeyProvider(keyProvider) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, keyProviderCc) + } + ecc := encconfig.CombineCryptoConfigs(encryptCcs) + if decryptCc != nil { + ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig) + } + ccs = append(ccs, ecc) + } + + if len(ccs) > 0 { + return encconfig.CombineCryptoConfigs(ccs), nil + } + return encconfig.CryptoConfig{}, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go index 41d0f1b3adf..9d1fe206d61 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -18,11 +18,12 @@ package jwe import ( "crypto/ecdsa" + "errors" + "fmt" "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" jose "gopkg.in/square/go-jose.v2" ) @@ -54,11 +55,11 @@ func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([] encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) if err != nil { - return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed") + return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err) } jwe, err := encrypter.Encrypt(optsData) if err != nil { - return nil, errors.Wrapf(err, "JWE Encrypt failed") + return nil, fmt.Errorf("JWE Encrypt failed: %w", err) } return []byte(jwe.FullSerialize()), nil } diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go index 3b4c47ed4f4..ddb244a8058 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -19,12 +19,14 @@ package keyprovider import ( "context" "encoding/json" + "errors" + "fmt" + "github.com/containers/ocicrypt/config" keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "google.golang.org/grpc" ) @@ -112,13 +114,13 @@ func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []b if kw.attrs.Command != nil { protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) if err != nil { - return nil, errors.Wrap(err, "error while retrieving keyprovider protocol command output") + return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err) } return protocolOuput.KeyWrapResults.Annotation, nil } else if kw.attrs.Grpc != "" { protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) if err != nil { - return nil, errors.Wrap(err, "error while retrieving keyprovider protocol grpc output") + return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err) } return protocolOuput.KeyWrapResults.Annotation, nil @@ -170,7 +172,7 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput cc, err := grpc.Dial(connString, grpc.WithInsecure()) if err != nil { - return nil, errors.Wrap(err, "error while dialing rpc server") + return nil, fmt.Errorf("error while dialing rpc server: %w", err) } defer func() { derr := cc.Close() @@ -187,12 +189,12 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide if operation == OpKeyWrap { grpcOutput, err = client.WrapKey(context.Background(), req) if err != nil { - return nil, errors.Wrap(err, "Error from grpc method") + return nil, fmt.Errorf("Error from grpc method: %w", err) } } else if operation == OpKeyUnwrap { grpcOutput, err = client.UnWrapKey(context.Background(), req) if err != nil { - return nil, errors.Wrap(err, "Error from grpc method") + return nil, fmt.Errorf("Error from grpc method: %w", err) } } else { return nil, errors.New("Unsupported operation") @@ -201,7 +203,7 @@ func getProviderGRPCOutput(input []byte, connString string, operation KeyProvide respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() err = json.Unmarshal(respBytes, &protocolOuput) if err != nil { - return nil, errors.Wrap(err, "Error while unmarshalling grpc method output") + return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err) } return &protocolOuput, nil @@ -216,7 +218,7 @@ func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) } err = json.Unmarshal(respBytes, &protocolOuput) if err != nil { - return nil, errors.Wrap(err, "Error while unmarshalling binary executable command output") + return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err) } return &protocolOuput, nil } diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go index 275a3d8b993..4ab9bd9783b 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -21,16 +21,15 @@ import ( "crypto" "crypto/rand" "encoding/base64" + "errors" "fmt" "io" - "io/ioutil" "net/mail" "strconv" "strings" "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" - "github.com/pkg/errors" "golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp/packet" ) @@ -64,7 +63,7 @@ func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([] ciphertext := new(bytes.Buffer) el, err := kw.createEntityList(ec) if err != nil { - return nil, errors.Wrap(err, "unable to create entity list") + return nil, fmt.Errorf("unable to create entity list: %w", err) } if len(el) == 0 { // nothing to do -- not an error @@ -100,7 +99,7 @@ func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ( r := bytes.NewBuffer(pgpPrivateKey) entityList, err := openpgp.ReadKeyRing(r) if err != nil { - return nil, errors.Wrap(err, "unable to parse private keys") + return nil, fmt.Errorf("unable to parse private keys: %w", err) } var prompt openpgp.PromptFunction @@ -126,7 +125,7 @@ func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ( continue } // we get the plain key options back - optsData, err := ioutil.ReadAll(md.UnverifiedBody) + optsData, err := io.ReadAll(md.UnverifiedBody) if err != nil { continue } @@ -142,7 +141,7 @@ func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, er for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) if err != nil { - return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet") + return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err) } newids, err := kw.getKeyIDs(pgpPacket) if err != nil { @@ -166,7 +165,7 @@ ParsePackets: break ParsePackets } if err != nil { - return []uint64{}, errors.Wrapf(err, "packets.Next() failed") + return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err) } switch p := p.(type) { case *packet.EncryptedKey: diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go index 803b90865bc..236764d2338 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -17,12 +17,13 @@ package pkcs11 import ( + "errors" + "fmt" + "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/crypto/pkcs11" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - - "github.com/pkg/errors" ) type pkcs11KeyWrapper struct { @@ -51,7 +52,7 @@ func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) if err != nil { - return nil, errors.Wrapf(err, "PKCS11 EncryptMulitple failed") + return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err) } return jsonString, nil } @@ -91,7 +92,7 @@ func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byt return plaintext, nil } - return nil, errors.Wrapf(err, "PKCS11: No suitable private key found for decryption") + return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err) } func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { @@ -139,7 +140,7 @@ func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, erro return pkcs11Keys, nil } -func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error){ +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) { if _, ok := dcparameters["pkcs11-config"]; ok { return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) } diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go index 1feae462bd7..603925dfea7 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -19,11 +19,12 @@ package pkcs7 import ( "crypto" "crypto/x509" + "errors" + "fmt" "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - "github.com/pkg/errors" "go.mozilla.org/pkcs7" ) @@ -104,7 +105,7 @@ func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byt p7, err := pkcs7.Parse(pkcs7Packet) if err != nil { - return nil, errors.Wrapf(err, "could not parse PKCS7 packet") + return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err) } for idx, privKey := range privKeys { diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go index 330069d491d..8665f6f21c4 100644 --- a/vendor/github.com/containers/ocicrypt/spec/spec.go +++ b/vendor/github.com/containers/ocicrypt/spec/spec.go @@ -3,10 +3,14 @@ package spec const ( // MediaTypeLayerEnc is MIME type used for encrypted layers. MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers. + // MediaTypeLayerGzipEnc is MIME type used for encrypted gzip-compressed layers. MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for encrypted zstd-compressed layers. + MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted" // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers. + // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. + MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" ) diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go index 078c34799f8..c6265168a1a 100644 --- a/vendor/github.com/containers/ocicrypt/utils/ioutils.go +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -18,9 +18,9 @@ package utils import ( "bytes" + "fmt" "io" "os/exec" - "github.com/pkg/errors" ) // FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns @@ -44,13 +44,15 @@ type Runner struct{} // ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { var out bytes.Buffer + var stderr bytes.Buffer stdInputBuffer := bytes.NewBuffer(input) cmd := exec.Command(cmdName, args...) cmd.Stdin = stdInputBuffer cmd.Stdout = &out + cmd.Stderr = &stderr err := cmd.Run() if err != nil { - return nil, errors.Wrapf(err, "Error while running command: %s", cmdName) + return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err) } return out.Bytes(), nil } diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go index 38633b19b8b..69bb9d12f1f 100644 --- a/vendor/github.com/containers/ocicrypt/utils/testing.go +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -24,17 +24,16 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "fmt" "math/big" "time" - - "github.com/pkg/errors" ) // CreateRSAKey creates an RSA key func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { key, err := rsa.GenerateKey(rand.Reader, bits) if err != nil { - return nil, errors.Wrap(err, "rsa.GenerateKey failed") + return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) } return key, nil } @@ -49,7 +48,7 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) if err != nil { - return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed") + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) } privData := x509.MarshalPKCS1PrivateKey(key) @@ -69,7 +68,7 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte if len(password) > 0 { block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if err != nil { - return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed") + return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err) } } else { block = &pem.Block{ @@ -88,17 +87,17 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { key, err := ecdsa.GenerateKey(curve, rand.Reader) if err != nil { - return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed") + return nil, nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) } pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) if err != nil { - return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed") + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) } privData, err := x509.MarshalECPrivateKey(key) if err != nil { - return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed") + return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err) } return pubData, privData, nil @@ -108,7 +107,7 @@ func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed") + return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) } ca := &x509.Certificate{ @@ -154,12 +153,12 @@ func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateK certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) if err != nil { - return nil, errors.Wrap(err, "x509.CreateCertificate failed") + return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err) } cert, err := x509.ParseCertificate(certDER) if err != nil { - return nil, errors.Wrap(err, "x509.ParseCertificate failed") + return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err) } return cert, nil diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go index 07fe6d36708..c24ee3b3e72 100644 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -21,12 +21,12 @@ import ( "crypto/x509" "encoding/base64" "encoding/pem" + "errors" "fmt" "strings" "github.com/containers/ocicrypt/crypto/pkcs11" - "github.com/pkg/errors" "golang.org/x/crypto/openpgp" json "gopkg.in/square/go-jose.v2" ) @@ -36,7 +36,7 @@ func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { jwk := json.JSONWebKey{} err := jwk.UnmarshalJSON(privKey) if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) } if jwk.IsPublic() { return nil, fmt.Errorf("%s: JWK is not a private key", prefix) @@ -49,7 +49,7 @@ func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { jwk := json.JSONWebKey{} err := jwk.UnmarshalJSON(privKey) if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) } if !jwk.IsPublic() { return nil, fmt.Errorf("%s: JWK is not a public key", prefix) @@ -97,11 +97,11 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{ var der []byte if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if privKeyPassword == nil { - return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix) + return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix) } der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if err != nil { - return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix) + return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix) } } else { der = block.Bytes @@ -111,7 +111,7 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{ if err != nil { key, err = x509.ParsePKCS1PrivateKey(der) if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix) + return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err) } } } else { @@ -145,7 +145,7 @@ func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { if block != nil { key, err = x509.ParsePKIXPublicKey(block.Bytes) if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix) + return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err) } } else { key, err = parseJWKPublicKey(pubKey, prefix) @@ -179,7 +179,7 @@ func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error } x509Cert, err = x509.ParseCertificate(block.Bytes) if err != nil { - return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix) + return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err) } } return x509Cert, err diff --git a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go index 8f52799812b..50bc52571ad 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go @@ -13,7 +13,7 @@ const ( // InspectResponseFalse) it will be used in the output of Inspect(). InspectAnnotationAutoremove = "io.podman.annotations.autoremove" // InspectAnnotationVolumesFrom is used by Inspect to identify - // containers whose volumes are are being used by this container. + // containers whose volumes are being used by this container. // It is expected to be a comma-separated list of container names and/or // IDs. // If an annotation with this key is found in the OCI spec, it will be @@ -135,6 +135,11 @@ const ( // creating a checkpoint image to specify the name of host distribution on // which the checkpoint was created. CheckpointAnnotationDistributionName = "io.podman.annotations.checkpoint.distribution.name" + + // InitContainerType is used by play kube when playing a kube yaml to specify the type + // of the init container. + InitContainerType = "io.podman.annotations.init.container.type" + // MaxKubeAnnotation is the max length of annotations allowed by Kubernetes. MaxKubeAnnotation = 63 ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/autoupdate.go b/vendor/github.com/containers/podman/v4/libpod/define/autoupdate.go new file mode 100644 index 00000000000..7c278c3c501 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/autoupdate.go @@ -0,0 +1,9 @@ +package define + +// AutoUpdateLabel denotes the container/pod label key to specify auto-update +// policies in container labels. +const AutoUpdateLabel = "io.containers.autoupdate" + +// AutoUpdateAuthfileLabel denotes the container label key to specify authfile +// in container labels. +const AutoUpdateAuthfileLabel = "io.containers.autoupdate.authfile" diff --git a/vendor/github.com/containers/podman/v4/libpod/define/config.go b/vendor/github.com/containers/podman/v4/libpod/define/config.go index 0181bd31ce0..7295f1425e2 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/config.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/config.go @@ -3,7 +3,6 @@ package define import ( "bufio" "io" - "regexp" "github.com/containers/common/libnetwork/types" ) @@ -20,8 +19,6 @@ var ( NameRegex = types.NameRegex // RegexError is thrown in presence of an invalid container/pod name. RegexError = types.RegexError - // UmaskRegex is a regular expression to validate Umask. - UmaskRegex = regexp.MustCompile(`^[0-7]{1,4}$`) ) const ( @@ -40,6 +37,10 @@ type InfoData struct { // itself. const VolumeDriverLocal = "local" +// VolumeDriverImage is the "image" volume driver. It is managed by Libpod and +// uses volumes backed by an image. +const VolumeDriverImage = "image" + const ( OCIManifestDir = "oci-dir" OCIArchive = "oci-archive" @@ -81,15 +82,8 @@ const NoLogging = "none" // PassthroughLogging is the string conmon expects when specifying to use the passthrough driver const PassthroughLogging = "passthrough" -// Strings used for --sdnotify option to podman -const ( - SdNotifyModeContainer = "container" - SdNotifyModeConmon = "conmon" - SdNotifyModeIgnore = "ignore" -) - // DefaultRlimitValue is the value set by default for nofile and nproc const RLimitDefaultValue = uint64(1048576) // BindMountPrefix distinguishes its annotations from others -const BindMountPrefix = "bind-mount-options:" +const BindMountPrefix = "bind-mount-options" diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container.go b/vendor/github.com/containers/podman/v4/libpod/define/container.go index bb44a6a4a42..ba939578f58 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/container.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/container.go @@ -35,4 +35,6 @@ const ( // OneShotInitContainer is a container that only runs as init once // and is then deleted. OneShotInitContainer = "once" + // ContainerInitPath is the default path of the mounted container init. + ContainerInitPath = "/run/podman-init" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go index 6cdffb8b7a6..038d4971b3e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go @@ -55,6 +55,8 @@ type InspectContainerConfig struct { StopSignal uint `json:"StopSignal"` // Configured healthcheck for the container Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` + // HealthcheckOnFailureAction defines an action to take once the container turns unhealthy. + HealthcheckOnFailureAction string `json:"HealthcheckOnFailureAction,omitempty"` // CreateCommand is the full command plus arguments of the process the // container has been created with. CreateCommand []string `json:"CreateCommand,omitempty"` @@ -79,6 +81,10 @@ type InspectContainerConfig struct { // treated as root directories. Standard bind mounts will be mounted // into paths relative to these directories. ChrootDirs []string `json:"ChrootDirs,omitempty"` + // SdNotifyMode is the sd-notify mode of the container. + SdNotifyMode string `json:"sdNotifyMode,omitempty"` + // SdNotifySocket is the NOTIFY_SOCKET in use by/configured for the container. + SdNotifySocket string `json:"sdNotifySocket,omitempty"` } // InspectRestartPolicy holds information about the container's restart policy. @@ -259,9 +265,8 @@ type HealthCheckLog struct { // as possible from the spec and container config. // Some things cannot be inferred. These will be populated by spec annotations // (if available). -// Field names are fixed for compatibility and cannot be changed. -// As such, silence lint warnings about them. -//nolint +// +//nolint:revive,stylecheck // Field names are fixed for compatibility and cannot be changed. type InspectContainerHostConfig struct { // Binds contains an array of user-added mounts. // Both volume mounts and named volumes are included. @@ -508,8 +513,8 @@ type InspectContainerHostConfig struct { // CpuRealtimeRuntime is the length of time (in microseconds) allocated // for realtime tasks within every CpuRealtimePeriod. CpuRealtimeRuntime int64 `json:"CpuRealtimeRuntime"` - // CpusetCpus is the is the set of CPUs that the container will execute - // on. Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs. + // CpusetCpus is the set of CPUs that the container will execute on. + // Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs. CpusetCpus string `json:"CpusetCpus"` // CpusetMems is the set of memory nodes the container will use. // Formatted as `0-3` or `0,2`. Default (if unset) is all memory nodes. @@ -546,7 +551,7 @@ type InspectContainerHostConfig struct { OomKillDisable bool `json:"OomKillDisable"` // Init indicates whether the container has an init mounted into it. Init bool `json:"Init,omitempty"` - // PidsLimit is the maximum number of PIDs what may be created within + // PidsLimit is the maximum number of PIDs that may be created within // the container. 0, the default, indicates no limit. PidsLimit int64 `json:"PidsLimit"` // Ulimits is a set of ulimits that will be set within the container. @@ -601,7 +606,7 @@ type InspectBasicNetworkConfig struct { AdditionalMacAddresses []string `json:"AdditionalMACAddresses,omitempty"` } -// InspectAdditionalNetwork holds information about non-default CNI networks the +// InspectAdditionalNetwork holds information about non-default networks the // container has been connected to. // As with InspectNetworkSettings, many fields are unused and maintained only // for compatibility with Docker. @@ -637,7 +642,7 @@ type InspectNetworkSettings struct { LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"` Ports map[string][]InspectHostPort `json:"Ports"` SandboxKey string `json:"SandboxKey"` - // Networks contains information on non-default CNI networks this + // Networks contains information on non-default networks this // container has joined. // It is a map of network name to network information. Networks map[string]*InspectAdditionalNetwork `json:"Networks,omitempty"` @@ -655,6 +660,7 @@ type InspectContainerData struct { Args []string `json:"Args"` State *InspectContainerState `json:"State"` Image string `json:"Image"` + ImageDigest string `json:"ImageDigest"` ImageName string `json:"ImageName"` Rootfs string `json:"Rootfs"` Pod string `json:"Pod"` @@ -683,6 +689,7 @@ type InspectContainerData struct { NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` Namespace string `json:"Namespace"` IsInfra bool `json:"IsInfra"` + IsService bool `json:"IsService"` Config *InspectContainerConfig `json:"Config"` HostConfig *InspectContainerHostConfig `json:"HostConfig"` } diff --git a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go index 9ad3aec08ed..56890339e8b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go @@ -1,9 +1,8 @@ package define import ( + "fmt" "time" - - "github.com/pkg/errors" ) // ContainerStatus represents the current state of a container @@ -26,7 +25,7 @@ const ( ContainerStateStopped ContainerStatus = iota // ContainerStatePaused indicates that the container has been paused ContainerStatePaused ContainerStatus = iota - // ContainerStateExited indicates the the container has stopped and been + // ContainerStateExited indicates the container has stopped and been // cleaned up ContainerStateExited ContainerStatus = iota // ContainerStateRemoving indicates the container is in the process of @@ -91,7 +90,7 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { case ContainerStateRemoving.String(): return ContainerStateRemoving, nil default: - return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) + return ContainerStateUnknown, fmt.Errorf("unknown container state: %s: %w", status, ErrInvalidArg) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/define/errors.go b/vendor/github.com/containers/podman/v4/libpod/define/errors.go index f5a7c73e5dd..be471c27e36 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/errors.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/errors.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" ) var ( @@ -24,6 +25,10 @@ var ( // not exist. ErrNoSuchExecSession = errors.New("no such exec session") + // ErrNoSuchExitCode indicates that the requested container exit code + // does not exist. + ErrNoSuchExitCode = errors.New("no such exit code") + // ErrDepExists indicates that the current object has dependencies and // cannot be removed before them. ErrDepExists = errors.New("dependency exists") @@ -88,7 +93,7 @@ var ( // ErrDetach indicates that an attach session was manually detached by // the user. - ErrDetach = errors.New("detached from container") + ErrDetach = util.ErrDetach // ErrWillDeadlock indicates that the requested operation will cause a // deadlock. This is usually caused by upgrade issues, and is resolved @@ -174,6 +179,9 @@ var ( // ErrNetworkInUse indicates the requested operation failed because the network was in use ErrNetworkInUse = errors.New("network is being used") + // ErrNetworkConnected indicates that the required operation failed because the container is already a network endpoint + ErrNetworkConnected = errors.New("network is already connected") + // ErrStoreNotInitialized indicates that the container storage was never // initialized. ErrStoreNotInitialized = errors.New("the container storage was never initialized") @@ -200,7 +208,7 @@ var ( // Useful for potentially long running tasks. ErrCanceled = errors.New("cancelled by user") - // ErrConmonVersionFormat is used when the expected versio-format of conmon + // ErrConmonVersionFormat is used when the expected version format of conmon // has changed. ErrConmonVersionFormat = "conmon version changed format" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go b/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go index f94616b3327..a84730e7226 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go @@ -1,9 +1,9 @@ package define import ( + "errors" "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -11,8 +11,8 @@ const ( // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed // prior to calling the runtime ExecErrorCodeGeneric = 125 - // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command - // an example of this can be found by trying to execute a directory: + // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command. + // An example of this can be found by trying to execute a directory: // `podman exec -l /etc` ExecErrorCodeCannotInvoke = 126 // ExecErrorCodeNotFound is the error code to return when a command cannot be found @@ -23,10 +23,10 @@ const ( // has a predefined exit code associated. If so, it returns that, otherwise it returns // the exit code originally stated in libpod.Exec() func TranslateExecErrorToExitCode(originalEC int, err error) int { - if errors.Cause(err) == ErrOCIRuntimePermissionDenied { + if errors.Is(err, ErrOCIRuntimePermissionDenied) { return ExecErrorCodeCannotInvoke } - if errors.Cause(err) == ErrOCIRuntimeNotFound { + if errors.Is(err, ErrOCIRuntimeNotFound) { return ExecErrorCodeNotFound } return originalEC diff --git a/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go index bde449d3077..15ea79fc20c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go @@ -1,5 +1,12 @@ package define +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/manifest" +) + const ( // HealthCheckHealthy describes a healthy container HealthCheckHealthy string = "healthy" @@ -33,6 +40,9 @@ const ( HealthCheckInternalError HealthCheckStatus = iota // HealthCheckDefined means the healthcheck was found on the container HealthCheckDefined HealthCheckStatus = iota + // HealthCheckStartup means the healthcheck was unhealthy, but is still + // either within the startup HC or the startup period of the healthcheck + HealthCheckStartup HealthCheckStatus = iota ) // Healthcheck defaults. These are used both in the cli as well in @@ -47,3 +57,91 @@ const ( // DefaultHealthCheckTimeout default value DefaultHealthCheckTimeout = "30s" ) + +// HealthConfig.Test options +const ( + // HealthConfigTestNone disables healthcheck + HealthConfigTestNone = "NONE" + // HealthConfigTestCmd execs arguments directly + HealthConfigTestCmd = "CMD" + // HealthConfigTestCmdShell runs commands with the system's default shell + HealthConfigTestCmdShell = "CMD-SHELL" +) + +// HealthCheckOnFailureAction defines how Podman reacts when a container's health +// status turns unhealthy. +type HealthCheckOnFailureAction int + +// Healthcheck on-failure actions. +const ( + // HealthCheckOnFailureActionNonce instructs Podman to not react on an unhealthy status. + HealthCheckOnFailureActionNone = iota // Must be first iota for backwards compatibility + // HealthCheckOnFailureActionInvalid denotes an invalid on-failure policy. + HealthCheckOnFailureActionInvalid = iota + // HealthCheckOnFailureActionNonce instructs Podman to kill the container on an unhealthy status. + HealthCheckOnFailureActionKill = iota + // HealthCheckOnFailureActionNonce instructs Podman to restart the container on an unhealthy status. + HealthCheckOnFailureActionRestart = iota + // HealthCheckOnFailureActionNonce instructs Podman to stop the container on an unhealthy status. + HealthCheckOnFailureActionStop = iota +) + +// String representations for on-failure actions. +const ( + strHealthCheckOnFailureActionNone = "none" + strHealthCheckOnFailureActionInvalid = "invalid" + strHealthCheckOnFailureActionKill = "kill" + strHealthCheckOnFailureActionRestart = "restart" + strHealthCheckOnFailureActionStop = "stop" +) + +// SupportedHealthCheckOnFailureActions lists all supported healthcheck restart policies. +var SupportedHealthCheckOnFailureActions = []string{ + strHealthCheckOnFailureActionNone, + strHealthCheckOnFailureActionKill, + strHealthCheckOnFailureActionRestart, + strHealthCheckOnFailureActionStop, +} + +// String returns the string representation of the HealthCheckOnFailureAction. +func (h HealthCheckOnFailureAction) String() string { + switch h { + case HealthCheckOnFailureActionNone: + return strHealthCheckOnFailureActionNone + case HealthCheckOnFailureActionKill: + return strHealthCheckOnFailureActionKill + case HealthCheckOnFailureActionRestart: + return strHealthCheckOnFailureActionRestart + case HealthCheckOnFailureActionStop: + return strHealthCheckOnFailureActionStop + default: + return strHealthCheckOnFailureActionInvalid + } +} + +// ParseHealthCheckOnFailureAction parses the specified string into a HealthCheckOnFailureAction. +// An error is returned for an invalid input. +func ParseHealthCheckOnFailureAction(s string) (HealthCheckOnFailureAction, error) { + switch s { + case "", strHealthCheckOnFailureActionNone: + return HealthCheckOnFailureActionNone, nil + case strHealthCheckOnFailureActionKill: + return HealthCheckOnFailureActionKill, nil + case strHealthCheckOnFailureActionRestart: + return HealthCheckOnFailureActionRestart, nil + case strHealthCheckOnFailureActionStop: + return HealthCheckOnFailureActionStop, nil + default: + err := fmt.Errorf("invalid on-failure action %q for health check: supported actions are %s", s, strings.Join(SupportedHealthCheckOnFailureActions, ",")) + return HealthCheckOnFailureActionInvalid, err + } +} + +// StartupHealthCheck is the configuration of a startup healthcheck. +type StartupHealthCheck struct { + manifest.Schema2HealthConfig + // Successes are the number of successes required to mark the startup HC + // as passed. + // If set to 0, a single success will mark the HC as passed. + Successes int `json:",omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/info.go b/vendor/github.com/containers/podman/v4/libpod/define/info.go index 911fa5c03c7..28260918b55 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/info.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/info.go @@ -14,7 +14,7 @@ type Info struct { Version Version `json:"version"` } -// HostInfo describes the libpod host +// SecurityInfo describes the libpod host type SecurityInfo struct { AppArmorEnabled bool `json:"apparmorEnabled"` DefaultCapabilities string `json:"capabilities"` @@ -64,8 +64,7 @@ type RemoteSocket struct { Exists bool `json:"exists,omitempty"` } -// SlirpInfo describes the slirp executable that -// is being being used. +// SlirpInfo describes the slirp executable that is being used type SlirpInfo struct { Executable string `json:"executable"` Package string `json:"package"` @@ -78,8 +77,7 @@ type IDMappings struct { UIDMap []idtools.IDMap `json:"uidmap"` } -// DistributionInfo describes the host distribution -// for libpod +// DistributionInfo describes the host distribution for libpod type DistributionInfo struct { Distribution string `json:"distribution"` Variant string `json:"variant,omitempty"` @@ -120,6 +118,7 @@ type StoreInfo struct { ImageStore ImageStore `json:"imageStore"` RunRoot string `json:"runRoot"` VolumePath string `json:"volumePath"` + TransientStore bool `json:"transientStore"` } // ImageStore describes the image store. Right now only the number @@ -141,8 +140,8 @@ type Plugins struct { Volume []string `json:"volume"` Network []string `json:"network"` Log []string `json:"log"` - // FIXME what should we do with Authorization, docker seems to return nothing by default - // Authorization []string `json:"authorization"` + // Authorization is provided for compatibility, will always be nil as Podman has no daemon + Authorization []string `json:"authorization"` } type CPUUsage struct { diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount.go b/vendor/github.com/containers/podman/v4/libpod/define/mount.go index 1b0d019c831..db444fd8340 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/mount.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount.go @@ -1,8 +1,6 @@ package define const ( - // TypeBind is the type for mounting host dir - TypeBind = "bind" // TypeVolume is the type for named volumes TypeVolume = "volume" // TypeTmpfs is the type for mounting tmpfs diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/define/mount_freebsd.go new file mode 100644 index 00000000000..e080c9ec6c8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount_freebsd.go @@ -0,0 +1,8 @@ +//go:build freebsd + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "nullfs" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_linux.go b/vendor/github.com/containers/podman/v4/libpod/define/mount_linux.go new file mode 100644 index 00000000000..5ef84890523 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount_linux.go @@ -0,0 +1,8 @@ +//go:build linux + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/define/mount_unsupported.go new file mode 100644 index 00000000000..cb8642fe24b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux && !freebsd + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go index c5ea3a3c086..dc82af2201b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go @@ -19,6 +19,8 @@ type InspectPodData struct { // CreateCommand is the full command plus arguments of the process the // container has been created with. CreateCommand []string `json:"CreateCommand,omitempty"` + // ExitPolicy of the pod. + ExitPolicy string `json:"ExitPolicy,omitempty"` // State represents the current state of the pod. State string `json:"State"` // Hostname is the hostname that the pod will set. @@ -55,18 +57,32 @@ type InspectPodData struct { CPUPeriod uint64 `json:"cpu_period,omitempty"` // CPUQuota contains the CPU quota of the pod CPUQuota int64 `json:"cpu_quota,omitempty"` + // CPUShares contains the cpu shares for the pod + CPUShares uint64 `json:"cpu_shares,omitempty"` // CPUSetCPUs contains linux specific CPU data for the pod CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + // CPUSetMems contains linux specific CPU data for the pod + CPUSetMems string `json:"cpuset_mems,omitempty"` // Mounts contains volume related information for the pod Mounts []InspectMount `json:"mounts,omitempty"` // Devices contains the specified host devices Devices []InspectDevice `json:"devices,omitempty"` // BlkioDeviceReadBps contains the Read/Access limit for the pod's devices BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"device_read_bps,omitempty"` + // BlkioDeviceReadBps contains the Read/Access limit for the pod's devices + BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"device_write_bps,omitempty"` // VolumesFrom contains the containers that the pod inherits mounts from VolumesFrom []string `json:"volumes_from,omitempty"` // SecurityOpt contains the specified security labels and related SELinux information SecurityOpts []string `json:"security_opt,omitempty"` + // MemoryLimit contains the specified cgroup memory limit for the pod + MemoryLimit uint64 `json:"memory_limit,omitempty"` + // MemorySwap contains the specified memory swap limit for the pod + MemorySwap uint64 `json:"memory_swap,omitempty"` + // BlkioWeight contains the blkio weight limit for the pod + BlkioWeight uint64 `json:"blkio_weight,omitempty"` + // BlkioWeightDevice contains the blkio weight device limits for the pod + BlkioWeightDevice []InspectBlkioWeightDevice `json:"blkio_weight_device,omitempty"` } // InspectPodInfraConfig contains the configuration of the pod's infra @@ -104,7 +120,7 @@ type InspectPodInfraConfig struct { // HostAdd adds a number of hosts to the infra container's resolv.conf // which will be shared with the rest of the pod. HostAdd []string - // Networks is a list of CNI networks the pod will join. + // Networks is a list of networks the pod will join. Networks []string // NetworkOptions are additional options for each network NetworkOptions map[string][]string @@ -118,6 +134,8 @@ type InspectPodInfraConfig struct { PidNS string `json:"pid_ns,omitempty"` // UserNS is the usernamespace that all the containers in the pod will join. UserNS string `json:"userns,omitempty"` + // UtsNS is the uts namespace that all containers in the pod will join + UtsNS string `json:"uts_ns,omitempty"` } // InspectPodContainerInfo contains information on a container in a pod. diff --git a/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go b/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go new file mode 100644 index 00000000000..1d548c764ed --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go @@ -0,0 +1,20 @@ +package define + +import "fmt" + +// Strings used for --sdnotify option to podman +const ( + SdNotifyModeContainer = "container" + SdNotifyModeConmon = "conmon" + SdNotifyModeIgnore = "ignore" +) + +// ValidateSdNotifyMode validates the specified mode. +func ValidateSdNotifyMode(mode string) error { + switch mode { + case "", SdNotifyModeContainer, SdNotifyModeConmon, SdNotifyModeIgnore: + return nil + default: + return fmt.Errorf("%w: invalid sdnotify value %q: must be %s, %s or %s", ErrInvalidArg, mode, SdNotifyModeContainer, SdNotifyModeConmon, SdNotifyModeIgnore) + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/terminal.go b/vendor/github.com/containers/podman/v4/libpod/define/terminal.go deleted file mode 100644 index ce895554491..00000000000 --- a/vendor/github.com/containers/podman/v4/libpod/define/terminal.go +++ /dev/null @@ -1,7 +0,0 @@ -package define - -// TerminalSize represents the width and height of a terminal. -type TerminalSize struct { - Width uint16 - Height uint16 -} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go index fac1791763a..4d6f12080ae 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go @@ -45,7 +45,7 @@ type InspectVolumeData struct { // GID is the GID that the volume was created with. GID int `json:"GID,omitempty"` // Anonymous indicates that the volume was created as an anonymous - // volume for a specific container, and will be be removed when any + // volume for a specific container, and will be removed when any // container using it is removed. Anonymous bool `json:"Anonymous,omitempty"` // MountCount is the number of times this volume has been mounted. @@ -56,4 +56,15 @@ type InspectVolumeData struct { // a container, the container will chown the volume to the container process // UID/GID. NeedsChown bool `json:"NeedsChown,omitempty"` + // Timeout is the specified driver timeout if given + Timeout uint `json:"Timeout,omitempty"` + // StorageID is the ID of the container backing the volume in c/storage. + // Only used with Image Volumes. + StorageID string `json:"StorageID,omitempty"` +} + +type VolumeReload struct { + Added []string + Removed []string + Errors []error } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/config.go b/vendor/github.com/containers/podman/v4/libpod/events/config.go index 00cdca00705..058b219a78e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/config.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/config.go @@ -2,9 +2,8 @@ package events import ( "context" + "errors" "time" - - "github.com/pkg/errors" ) // EventerType ... @@ -40,6 +39,8 @@ type Event struct { Time time.Time // Type of event that occurred Type Type + // Health status of the current container + HealthStatus string `json:"health_status,omitempty"` Details } @@ -49,6 +50,12 @@ type Event struct { type Details struct { // ID is the event ID ID string + // ContainerInspectData includes the payload of the container's inspect + // data. Only set when events_container_create_inspect_data is set true + // in containers.conf. + ContainerInspectData string `json:",omitempty"` + // PodID is the ID of the pod associated with the container. + PodID string `json:",omitempty"` // Attributes can be used to describe specifics about the event // in the case of a container event, labels for example Attributes map[string]string @@ -98,6 +105,8 @@ type Type string // Status describes the actual event action (stop, start, create, kill) type Status string +// When updating this list below please also update the shell completion list in +// cmd/podman/common/completion.go and the StringToXXX function in events.go. const ( // Container - event is related to containers Container Type = "container" @@ -139,6 +148,8 @@ const ( Exited Status = "died" // Export ... Export Status = "export" + // HealthStatus ... + HealthStatus Status = "health_status" // History ... History Status = "history" // Import ... diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events.go b/vendor/github.com/containers/podman/v4/libpod/events/events.go index e83c2efeeef..2105a3b89fd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/events.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events.go @@ -2,16 +2,16 @@ package events import ( "encoding/json" + "errors" "fmt" "time" "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" ) // ErrNoJournaldLogging indicates that there is no journald logging // supported (requires libsystemd) -var ErrNoJournaldLogging = errors.New("No support for journald logging") +var ErrNoJournaldLogging = errors.New("no support for journald logging") // String returns a string representation of EventerType func (et EventerType) String() string { @@ -77,6 +77,12 @@ func (e *Event) ToHumanReadable(truncate bool) string { switch e.Type { case Container, Pod: humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name) + if e.PodID != "" { + humanFormat += fmt.Sprintf(", pod_id=%s", e.PodID) + } + if e.HealthStatus != "" { + humanFormat += fmt.Sprintf(", health_status=%s", e.HealthStatus) + } // check if the container has labels and add it to the output if len(e.Attributes) > 0 { for k, v := range e.Attributes { @@ -140,12 +146,10 @@ func StringToType(name string) (Type, error) { case "": return "", ErrEventTypeBlank } - return "", errors.Errorf("unknown event type %q", name) + return "", fmt.Errorf("unknown event type %q", name) } // StringToStatus converts a string to an Event Status -// TODO if we add more events, we might consider a go-generator to -// create the switch statement func StringToStatus(name string) (Status, error) { switch name { case Attach.String(): @@ -170,6 +174,8 @@ func StringToStatus(name string) (Status, error) { return Exited, nil case Export.String(): return Export, nil + case HealthStatus.String(): + return HealthStatus, nil case History.String(): return History, nil case Import.String(): @@ -225,5 +231,5 @@ func StringToStatus(name string) (Status, error) { case Untag.String(): return Untag, nil } - return "", errors.Errorf("unknown event status %q", name) + return "", fmt.Errorf("unknown event status %q", name) } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/events/events_freebsd.go new file mode 100644 index 00000000000..90933fa2cc5 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_freebsd.go @@ -0,0 +1,23 @@ +package events + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// NewEventer creates an eventer based on the eventer type +func NewEventer(options EventerOptions) (Eventer, error) { + logrus.Debugf("Initializing event backend %s", options.EventerType) + switch strings.ToUpper(options.EventerType) { + case strings.ToUpper(LogFile.String()): + return EventLogFile{options}, nil + case strings.ToUpper(Null.String()): + return newNullEventer(), nil + case strings.ToUpper(Memory.String()): + return NewMemoryEventer(), nil + default: + return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go index 4320f219018..66b125dd5fa 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go @@ -1,9 +1,9 @@ package events import ( + "fmt" "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -14,16 +14,16 @@ func NewEventer(options EventerOptions) (Eventer, error) { case strings.ToUpper(Journald.String()): eventer, err := newEventJournalD(options) if err != nil { - return nil, errors.Wrapf(err, "eventer creation") + return nil, fmt.Errorf("eventer creation: %w", err) } return eventer, nil case strings.ToUpper(LogFile.String()): - return EventLogFile{options}, nil + return newLogFileEventer(options) case strings.ToUpper(Null.String()): - return NewNullEventer(), nil + return newNullEventer(), nil case strings.ToUpper(Memory.String()): return NewMemoryEventer(), nil default: - return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) + return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go index 25c17552419..01031c22504 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go @@ -1,9 +1,9 @@ -//go:build !linux -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package events -import "github.com/pkg/errors" +import "errors" // NewEventer creates an eventer based on the eventer type func NewEventer(options EventerOptions) (Eventer, error) { diff --git a/vendor/github.com/containers/podman/v4/libpod/events/filters.go b/vendor/github.com/containers/podman/v4/libpod/events/filters.go index 64c162db2f6..9057c9b79c2 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/filters.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/filters.go @@ -1,11 +1,11 @@ package events import ( + "fmt" "strings" "time" "github.com/containers/podman/v4/pkg/util" - "github.com/pkg/errors" ) func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) { @@ -21,6 +21,9 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error return strings.HasPrefix(e.ID, filterValue) }, nil case "EVENT", "STATUS": + if filterValue == "die" { // Docker compat + filterValue = "died" + } return func(e *Event) bool { return string(e.Status) == filterValue }, nil @@ -74,7 +77,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error return found }, nil } - return nil, errors.Errorf("%s is an invalid filter", filter) + return nil, fmt.Errorf("%s is an invalid filter", filter) } func generateEventSinceOption(timeSince time.Time) func(e *Event) bool { @@ -92,7 +95,7 @@ func generateEventUntilOption(timeUntil time.Time) func(e *Event) bool { func parseFilter(filter string) (string, string, error) { filterSplit := strings.SplitN(filter, "=", 2) if len(filterSplit) != 2 { - return "", "", errors.Errorf("%s is an invalid filter", filter) + return "", "", fmt.Errorf("%s is an invalid filter", filter) } return filterSplit[0], filterSplit[1], nil } @@ -137,7 +140,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E if len(since) > 0 { timeSince, err := util.ParseInputTime(since, true) if err != nil { - return nil, errors.Wrapf(err, "unable to convert since time of %s", since) + return nil, fmt.Errorf("unable to convert since time of %s: %w", since, err) } filterFunc := generateEventSinceOption(timeSince) filterMap["since"] = []EventFilter{filterFunc} @@ -146,7 +149,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E if len(until) > 0 { timeUntil, err := util.ParseInputTime(until, false) if err != nil { - return nil, errors.Wrapf(err, "unable to convert until time of %s", until) + return nil, fmt.Errorf("unable to convert until time of %s: %w", until, err) } filterFunc := generateEventUntilOption(timeUntil) filterMap["until"] = []EventFilter{filterFunc} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go index 866042a4c3d..0f472b8d899 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go @@ -6,13 +6,15 @@ package events import ( "context" "encoding/json" + "errors" + "fmt" "strconv" "time" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/pkg/util" "github.com/coreos/go-systemd/v22/journal" "github.com/coreos/go-systemd/v22/sdjournal" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -49,6 +51,9 @@ func (e EventJournalD) Write(ee Event) error { if ee.ContainerExitCode != 0 { m["PODMAN_EXIT_CODE"] = strconv.Itoa(ee.ContainerExitCode) } + if ee.PodID != "" { + m["PODMAN_POD_ID"] = ee.PodID + } // If we have container labels, we need to convert them to a string so they // can be recorded with the event if len(ee.Details.Attributes) > 0 { @@ -58,13 +63,18 @@ func (e EventJournalD) Write(ee Event) error { } m["PODMAN_LABELS"] = string(b) } + m["PODMAN_HEALTH_STATUS"] = ee.HealthStatus + + if len(ee.Details.ContainerInspectData) > 0 { + m["PODMAN_CONTAINER_INSPECT_DATA"] = ee.Details.ContainerInspectData + } case Network: m["PODMAN_ID"] = ee.ID m["PODMAN_NETWORK_NAME"] = ee.Network case Volume: m["PODMAN_NAME"] = ee.Name } - return journal.Send(string(ee.ToHumanReadable(false)), journal.PriInfo, m) + return journal.Send(ee.ToHumanReadable(false), journal.PriInfo, m) } // Read reads events from the journal and sends qualified events to the event channel @@ -72,7 +82,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { defer close(options.EventChannel) filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) if err != nil { - return errors.Wrapf(err, "failed to parse event filters") + return fmt.Errorf("failed to parse event filters: %w", err) } var untilTime time.Time @@ -92,72 +102,59 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { logrus.Errorf("Unable to close journal :%v", err) } }() + err = j.SetDataThreshold(0) + if err != nil { + logrus.Warnf("cannot set data threshold: %v", err) + } // match only podman journal entries podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"} if err := j.AddMatch(podmanJournal.String()); err != nil { - return errors.Wrap(err, "failed to add journal filter for event log") + return fmt.Errorf("failed to add SYSLOG_IDENTIFIER journal filter for event log: %w", err) + } + + // make sure we only read events for the current user + uidMatch := sdjournal.Match{Field: "_UID", Value: strconv.Itoa(rootless.GetRootlessUID())} + if err := j.AddMatch(uidMatch.String()); err != nil { + return fmt.Errorf("failed to add _UID journal filter for event log: %w", err) } if len(options.Since) == 0 && len(options.Until) == 0 && options.Stream { if err := j.SeekTail(); err != nil { - return errors.Wrap(err, "failed to seek end of journal") + return fmt.Errorf("failed to seek end of journal: %w", err) } // After SeekTail calling Next moves to a random entry. // To prevent this we have to call Previous first. // see: https://bugs.freedesktop.org/show_bug.cgi?id=64614 if _, err := j.Previous(); err != nil { - return errors.Wrap(err, "failed to move journal cursor to previous entry") + return fmt.Errorf("failed to move journal cursor to previous entry: %w", err) + } + } else if len(options.Since) > 0 { + since, err := util.ParseInputTime(options.Since, true) + if err != nil { + return err + } + // seek based on time which helps to reduce unnecessary event reads + if err := j.SeekRealtimeUsec(uint64(since.UnixMicro())); err != nil { + return err } } - // the api requires a next|prev before getting a cursor - if _, err := j.Next(); err != nil { - return errors.Wrap(err, "failed to move journal cursor to next entry") - } - - prevCursor, err := j.GetCursor() - if err != nil { - return errors.Wrap(err, "failed to get journal cursor") - } for { - select { - case <-ctx.Done(): - // the consumer has cancelled - return nil - default: - // fallthrough - } - - if _, err := j.Next(); err != nil { - return errors.Wrap(err, "failed to move journal cursor to next entry") - } - newCursor, err := j.GetCursor() + entry, err := GetNextEntry(ctx, j, options.Stream, untilTime) if err != nil { - return errors.Wrap(err, "failed to get journal cursor") + return err } - if prevCursor == newCursor { - if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) { - break - } - t := sdjournal.IndefiniteWait - if len(options.Until) > 0 { - t = time.Until(untilTime) - } - _ = j.Wait(t) - continue + // no entry == we hit the end + if entry == nil { + return nil } - prevCursor = newCursor - entry, err := j.GetEntry() - if err != nil { - return errors.Wrap(err, "failed to read journal entry") - } newEvent, err := newEventFromJournalEntry(entry) if err != nil { // We can't decode this event. // Don't fail hard - that would make events unusable. // Instead, log and continue. - if errors.Cause(err) != ErrEventTypeBlank { + if !errors.Is(err, ErrEventTypeBlank) { logrus.Errorf("Unable to decode event: %v", err) } continue @@ -166,11 +163,9 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { options.EventChannel <- newEvent } } - return nil - } -func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { //nolint +func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { newEvent := Event{} eventType, err := StringToType(entry.Fields["PODMAN_TYPE"]) if err != nil { @@ -193,6 +188,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { / case Container, Pod: newEvent.ID = entry.Fields["PODMAN_ID"] newEvent.Image = entry.Fields["PODMAN_IMAGE"] + newEvent.PodID = entry.Fields["PODMAN_POD_ID"] if code, ok := entry.Fields["PODMAN_EXIT_CODE"]; ok { intCode, err := strconv.Atoi(code) if err != nil { @@ -211,9 +207,11 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { / // if we have labels, add them to the event if len(labels) > 0 { - newEvent.Details = Details{Attributes: labels} + newEvent.Attributes = labels } } + newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"] + newEvent.Details.ContainerInspectData = entry.Fields["PODMAN_CONTAINER_INSPECT_DATA"] case Network: newEvent.ID = entry.Fields["PODMAN_ID"] newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"] @@ -227,3 +225,51 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { / func (e EventJournalD) String() string { return Journald.String() } + +// GetNextEntry returns the next entry in the journal. If the end of the +// journal is reached and stream is not set or the current time is after +// the until time this function return nil,nil. +func GetNextEntry(ctx context.Context, j *sdjournal.Journal, stream bool, untilTime time.Time) (*sdjournal.JournalEntry, error) { + for { + select { + case <-ctx.Done(): + // the consumer has cancelled + return nil, nil + default: + // fallthrough + } + // the api requires a next|prev before reading the event + ret, err := j.Next() + if err != nil { + return nil, fmt.Errorf("failed to move journal cursor to next entry: %w", err) + } + // ret == 0 equals EOF, see sd_journal_next(3) + if ret == 0 { + if !stream || (!untilTime.IsZero() && time.Now().After(untilTime)) { + // we hit the end and should not keep streaming + return nil, nil + } + // keep waiting for the next entry + // j.Wait() is blocking, this would cause the goroutine to hang forever + // if no more journal entries are generated and thus if the client + // has closed the connection in the meantime to leak memory. + // Waiting only 5 seconds makes sure we can check if the client closed in the + // meantime at least every 5 seconds. + t := 5 * time.Second + if !untilTime.IsZero() { + until := time.Until(untilTime) + if until < t { + t = until + } + } + _ = j.Wait(t) + continue + } + + entry, err := j.GetEntry() + if err != nil { + return nil, fmt.Errorf("failed to read journal entry: %w", err) + } + return entry, nil + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go index 21fdd802712..b0872bf4cfb 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go @@ -1,22 +1,22 @@ -//go:build linux -// +build linux +//go:build linux || freebsd +// +build linux freebsd package events import ( "bufio" "context" + "errors" "fmt" "io" - "io/ioutil" "os" "path" + "path/filepath" "time" "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage/pkg/lockfile" "github.com/nxadm/tail" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -27,10 +27,25 @@ type EventLogFile struct { options EventerOptions } +// newLogFileEventer creates a new EventLogFile eventer +func newLogFileEventer(options EventerOptions) (*EventLogFile, error) { + // Create events log dir + if err := os.MkdirAll(filepath.Dir(options.LogFilePath), 0700); err != nil { + return nil, fmt.Errorf("creating events dirs: %w", err) + } + // We have to make sure the file is created otherwise reading events will hang. + // https://github.com/containers/podman/issues/15688 + fd, err := os.OpenFile(options.LogFilePath, os.O_RDONLY|os.O_CREATE, 0700) + if err != nil { + return nil, fmt.Errorf("failed to create event log file: %w", err) + } + return &EventLogFile{options: options}, fd.Close() +} + // Writes to the log file func (e EventLogFile) Write(ee Event) error { // We need to lock events file - lock, err := lockfile.GetLockfile(e.options.LogFilePath + ".lock") + lock, err := lockfile.GetLockFile(e.options.LogFilePath + ".lock") if err != nil { return err } @@ -90,7 +105,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { defer close(options.EventChannel) filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) if err != nil { - return errors.Wrapf(err, "failed to parse event filters") + return fmt.Errorf("failed to parse event filters: %w", err) } t, err := e.getTail(options) if err != nil { @@ -108,23 +123,21 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { } }() } - funcDone := make(chan bool) - copy := true - go func() { - select { - case <-funcDone: - // Do nothing - case <-ctx.Done(): - copy = false - t.Kill(errors.New("hangup by client")) - } - }() - for line := range t.Lines { + logrus.Debugf("Reading events from file %q", e.options.LogFilePath) + + var line *tail.Line + var ok bool + for { select { case <-ctx.Done(): // the consumer has cancelled + t.Kill(errors.New("hangup by client")) return nil - default: + case line, ok = <-t.Lines: + if !ok { + // channel was closed + return nil + } // fallthrough } @@ -136,14 +149,12 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { case Image, Volume, Pod, System, Container, Network: // no-op default: - return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) + return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) } - if copy && applyFilters(event, filterMap) { + if applyFilters(event, filterMap) { options.EventChannel <- event } } - funcDone <- true - return nil } // String returns a string representation of the logger @@ -192,11 +203,11 @@ func truncate(filePath string) error { size := origFinfo.Size() threshold := size / 2 - tmp, err := ioutil.TempFile(path.Dir(filePath), "") + tmp, err := os.CreateTemp(path.Dir(filePath), "") if err != nil { // Retry in /tmp in case creating a tmp file in the same // directory has failed. - tmp, err = ioutil.TempFile("", "") + tmp, err = os.CreateTemp("", "") if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/nullout.go b/vendor/github.com/containers/podman/v4/libpod/events/nullout.go index 3eca9e8dba3..da3820c23c2 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/nullout.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/nullout.go @@ -2,10 +2,11 @@ package events import ( "context" + "errors" ) -// EventToNull is an eventer type that only performs write operations -// and only writes to /dev/null. It is meant for unittests only +// EventToNull is an eventer type that does nothing. +// It is meant for unittests only type EventToNull struct{} // Write eats the event and always returns nil @@ -13,14 +14,14 @@ func (e EventToNull) Write(ee Event) error { return nil } -// Read does nothing. Do not use it. +// Read does nothing and returns an error. func (e EventToNull) Read(ctx context.Context, options ReadOptions) error { - return nil + return errors.New("cannot read events with the \"none\" backend") } -// NewNullEventer returns a new null eventer. You should only do this for -// the purposes on internal libpod testing. -func NewNullEventer() Eventer { +// newNullEventer returns a new null eventer. You should only do this for +// the purposes of internal libpod testing. +func newNullEventer() Eventer { return EventToNull{} } diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go index fbe03d97b30..0696920142d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go @@ -29,7 +29,8 @@ func NewAPIDecoder() *schema.Decoder { } // On client: -// v := map[string][]string{ +// +// v := map[string][]string{ // "dangling": {"true"}, // } // diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go index 9eb712c304d..bb416d9f41c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go @@ -2,6 +2,7 @@ package handlers import ( "context" + "fmt" "time" "github.com/containers/common/libimage" @@ -10,7 +11,7 @@ import ( dockerContainer "github.com/docker/docker/api/types/container" dockerNetwork "github.com/docker/docker/api/types/network" "github.com/docker/go-connections/nat" - "github.com/pkg/errors" + "github.com/opencontainers/runtime-spec/specs-go" ) type AuthConfig struct { @@ -64,6 +65,12 @@ type LibpodContainersRmReport struct { RmError string `json:"Err,omitempty"` } +// UpdateEntities used to wrap the oci resource spec in a swagger model +// swagger:model +type UpdateEntities struct { + Resources *specs.LinuxResources +} + type Info struct { docker.Info BuildahVersion string @@ -127,6 +134,7 @@ type CreateContainerConfig struct { dockerContainer.Config // desired container configuration HostConfig dockerContainer.HostConfig // host dependent configuration for container NetworkingConfig dockerNetwork.NetworkingConfig // network configuration for container + EnvMerge []string // preprocess env variables from image before injecting into containers UnsetEnv []string // unset specified default environment variables UnsetEnvAll bool // unset all default environment variables } @@ -162,7 +170,7 @@ type ExecStartConfig struct { func ImageDataToImageInspect(ctx context.Context, l *libimage.Image) (*ImageInspect, error) { options := &libimage.InspectOptions{WithParent: true, WithSize: true} - info, err := l.Inspect(context.Background(), options) + info, err := l.Inspect(ctx, options) if err != nil { return nil, err } @@ -237,17 +245,17 @@ func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) { case "tcp", "": p, err := nat.NewPort("tcp", port) if err != nil { - return nil, errors.Wrapf(err, "unable to create tcp port from %s", k) + return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err) } ports[p] = struct{}{} case "udp": p, err := nat.NewPort("udp", port) if err != nil { - return nil, errors.Wrapf(err, "unable to create tcp port from %s", k) + return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err) } ports[p] = struct{}{} default: - return nil, errors.Errorf("invalid port proto %q in %q", proto, k) + return nil, fmt.Errorf("invalid port proto %q in %q", proto, k) } } return ports, nil diff --git a/vendor/github.com/containers/podman/v4/pkg/auth/auth.go b/vendor/github.com/containers/podman/v4/pkg/auth/auth.go index 4192250079f..2a675e2abcb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/auth/auth.go +++ b/vendor/github.com/containers/podman/v4/pkg/auth/auth.go @@ -3,7 +3,7 @@ package auth import ( "encoding/base64" "encoding/json" - "io/ioutil" + "fmt" "net/http" "os" "strings" @@ -11,7 +11,6 @@ import ( imageAuth "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/types" dockerAPITypes "github.com/docker/docker/api/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -47,7 +46,7 @@ func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { return nil, "", nil } if err != nil { - return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String()) + return nil, "", fmt.Errorf("failed to parse %q header for %s: %w", headerName, r.URL.String(), err) } var authFile string @@ -56,7 +55,7 @@ func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { } else { authFile, err = authConfigsToAuthFile(fileContents) if err != nil { - return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String()) + return nil, "", fmt.Errorf("failed to parse %q header for %s: %w", headerName, r.URL.String(), err) } } return override, authFile, nil @@ -72,13 +71,13 @@ func getConfigCredentials(r *http.Request, headers []string) (*types.DockerAuthC for _, h := range headers { param, err := base64.URLEncoding.DecodeString(h) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to decode %q", xRegistryConfigHeader) + return nil, nil, fmt.Errorf("failed to decode %q: %w", xRegistryConfigHeader, err) } ac := make(map[string]dockerAPITypes.AuthConfig) err = json.Unmarshal(param, &ac) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to unmarshal %q", xRegistryConfigHeader) + return nil, nil, fmt.Errorf("failed to unmarshal %q: %w", xRegistryConfigHeader, err) } for k, v := range ac { @@ -228,25 +227,23 @@ func encodeMultiAuthConfigs(authConfigs map[string]types.DockerAuthConfig) (stri } // authConfigsToAuthFile stores the specified auth configs in a temporary files -// and returns its path. The file can later be used an auth file for contacting +// and returns its path. The file can later be used as an auth file for contacting // one or more container registries. If tmpDir is empty, the system's default // TMPDIR will be used. func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (string, error) { // Initialize an empty temporary JSON file. - tmpFile, err := ioutil.TempFile("", "auth.json.") + tmpFile, err := os.CreateTemp("", "auth.json.") if err != nil { return "", err } if _, err := tmpFile.Write([]byte{'{', '}'}); err != nil { - return "", errors.Wrap(err, "error initializing temporary auth file") + return "", fmt.Errorf("initializing temporary auth file: %w", err) } if err := tmpFile.Close(); err != nil { - return "", errors.Wrap(err, "error closing temporary auth file") + return "", fmt.Errorf("closing temporary auth file: %w", err) } authFilePath := tmpFile.Name() - // TODO: It would be nice if c/image could dump the map at once. - // // Now use the c/image packages to store the credentials. It's battle // tested, and we make sure to use the same code as the image backend. sys := types.SystemContext{AuthFilePath: authFilePath} @@ -257,7 +254,7 @@ func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (strin // that all credentials are valid. They'll be used on demand // later. if err := imageAuth.SetAuthentication(&sys, key, config.Username, config.Password); err != nil { - return "", errors.Wrapf(err, "error storing credentials in temporary auth file (key: %q / %q, user: %q)", authFileKey, key, config.Username) + return "", fmt.Errorf("storing credentials in temporary auth file (key: %q / %q, user: %q): %w", authFileKey, key, config.Username, err) } } @@ -287,7 +284,7 @@ func normalizeAuthFileKey(authFileKey string) string { // dockerAuthToImageAuth converts a docker auth config to one we're using // internally from c/image. Note that the Docker types look slightly // different, so we need to convert to be extra sure we're not running into -// undesired side-effects when unmarhalling directly to our types. +// undesired side-effects when unmarshalling directly to our types. func dockerAuthToImageAuth(authConfig dockerAPITypes.AuthConfig) types.DockerAuthConfig { return types.DockerAuthConfig{ Username: authConfig.Username, diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go b/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go index 3739ec404fd..7bd1ff48493 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go @@ -2,6 +2,7 @@ package bindings import ( "context" + "errors" "fmt" "io" "net" @@ -12,13 +13,11 @@ import ( "strings" "time" - "github.com/blang/semver" - "github.com/containers/podman/v4/pkg/terminal" + "github.com/blang/semver/v4" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/version" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" + "golang.org/x/net/proxy" ) type APIResponse struct { @@ -43,7 +42,7 @@ func GetClient(ctx context.Context) (*Connection, error) { if c, ok := ctx.Value(clientKey).(*Connection); ok { return c, nil } - return nil, errors.Errorf("%s not set in context", clientKey) + return nil, fmt.Errorf("%s not set in context", clientKey) } // ServiceVersion from context build by NewConnection() @@ -61,7 +60,7 @@ func JoinURL(elements ...string) string { // NewConnection creates a new service connection without an identity func NewConnection(ctx context.Context, uri string) (context.Context, error) { - return NewConnectionWithIdentity(ctx, uri, "") + return NewConnectionWithIdentity(ctx, uri, "", false) } // NewConnectionWithIdentity takes a URI as a string and returns a context with the @@ -72,10 +71,9 @@ func NewConnection(ctx context.Context, uri string) (context.Context, error) { // For example tcp://localhost: // or unix:///run/podman/podman.sock // or ssh://@[:port]/run/podman/podman.sock?secure=True -func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) (context.Context, error) { +func NewConnectionWithIdentity(ctx context.Context, uri string, identity string, machine bool) (context.Context, error) { var ( - err error - secure bool + err error ) if v, found := os.LookupEnv("CONTAINER_HOST"); found && uri == "" { uri = v @@ -85,25 +83,39 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) identity = v } - passPhrase := "" - if v, found := os.LookupEnv("CONTAINER_PASSPHRASE"); found { - passPhrase = v - } - _url, err := url.Parse(uri) if err != nil { - return nil, errors.Wrapf(err, "Value of CONTAINER_HOST is not a valid url: %s", uri) + return nil, fmt.Errorf("value of CONTAINER_HOST is not a valid url: %s: %w", uri, err) } - // Now we setup the http Client to use the connection above + // Now we set up the http Client to use the connection above var connection Connection switch _url.Scheme { case "ssh": - secure, err = strconv.ParseBool(_url.Query().Get("secure")) + port := 22 + if _url.Port() != "" { + port, err = strconv.Atoi(_url.Port()) + if err != nil { + return nil, err + } + } + conn, err := ssh.Dial(&ssh.ConnectionDialOptions{ + Host: uri, + Identity: identity, + User: _url.User, + Port: port, + InsecureIsMachineConnection: machine, + }, "golang") if err != nil { - secure = false + return nil, err } - connection, err = sshClient(_url, secure, passPhrase, identity) + connection = Connection{URI: _url} + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return ssh.DialNet(conn, "unix", _url) + }, + }} case "unix": if !strings.HasPrefix(uri, "unix:///") { // autofix unix://path_element vs unix:///path_element @@ -115,36 +127,66 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) if !strings.HasPrefix(uri, "tcp://") { return nil, errors.New("tcp URIs should begin with tcp://") } - connection = tcpClient(_url) + conn, err := tcpClient(_url) + if err != nil { + return nil, err + } + connection = conn default: - return nil, errors.Errorf("unable to create connection. %q is not a supported schema", _url.Scheme) + return nil, fmt.Errorf("unable to create connection. %q is not a supported schema", _url.Scheme) } if err != nil { - return nil, errors.Wrapf(err, "unable to connect to Podman. failed to create %sClient", _url.Scheme) + return nil, fmt.Errorf("unable to connect to Podman. failed to create %sClient: %w", _url.Scheme, err) } ctx = context.WithValue(ctx, clientKey, &connection) serviceVersion, err := pingNewConnection(ctx) if err != nil { - return nil, errors.Wrap(err, "unable to connect to Podman socket") + return nil, fmt.Errorf("unable to connect to Podman socket: %w", err) } ctx = context.WithValue(ctx, versionKey, serviceVersion) return ctx, nil } -func tcpClient(_url *url.URL) Connection { +func tcpClient(_url *url.URL) (Connection, error) { connection := Connection{ URI: _url, } + dialContext := func(ctx context.Context, _, _ string) (net.Conn, error) { + return net.Dial("tcp", _url.Host) + } + // use proxy if env `CONTAINER_PROXY` set + if proxyURI, found := os.LookupEnv("CONTAINER_PROXY"); found { + proxyURL, err := url.Parse(proxyURI) + if err != nil { + return connection, fmt.Errorf("value of CONTAINER_PROXY is not a valid url: %s: %w", proxyURI, err) + } + proxyDialer, err := proxy.FromURL(proxyURL, proxy.Direct) + if err != nil { + return connection, fmt.Errorf("unable to dial to proxy %s, %w", proxyURI, err) + } + dialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + logrus.Debugf("use proxy %s, but proxy dialer does not support dial timeout", proxyURI) + return proxyDialer.Dial("tcp", _url.Host) + } + if f, ok := proxyDialer.(proxy.ContextDialer); ok { + dialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + // the default tcp dial timeout seems to be 75s, podman-remote will retry 3 times before exit. + // here we change proxy dial timeout to 3s + logrus.Debugf("use proxy %s with dial timeout 3s", proxyURI) + ctx, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() // It's safe to cancel, `f.DialContext` only use ctx for returning the Conn, not the lifetime of the Conn. + return f.DialContext(ctx, "tcp", _url.Host) + } + } + } connection.Client = &http.Client{ Transport: &http.Transport{ - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - return net.Dial("tcp", _url.Host) - }, + DialContext: dialContext, DisableCompression: true, }, } - return connection + return connection, nil } // pingNewConnection pings to make sure the RESTFUL service is up @@ -164,7 +206,7 @@ func pingNewConnection(ctx context.Context) (*semver.Version, error) { if response.StatusCode == http.StatusOK { versionHdr := response.Header.Get("Libpod-API-Version") if versionHdr == "" { - logrus.Info("Service did not provide Libpod-API-Version Header") + logrus.Warn("Service did not provide Libpod-API-Version Header") return new(semver.Version), nil } versionSrv, err := semver.ParseTolerant(versionHdr) @@ -177,129 +219,11 @@ func pingNewConnection(ctx context.Context) (*semver.Version, error) { // Server's job when Client version is equal or older return &versionSrv, nil case 1: - return nil, errors.Errorf("server API version is too old. Client %q server %q", + return nil, fmt.Errorf("server API version is too old. Client %q server %q", version.APIVersion[version.Libpod][version.MinimalAPI].String(), versionSrv.String()) } } - return nil, errors.Errorf("ping response was %d", response.StatusCode) -} - -func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (Connection, error) { - // if you modify the authmethods or their conditionals, you will also need to make similar - // changes in the client (currently cmd/podman/system/connection/add getUDS). - - var signers []ssh.Signer // order Signers are appended to this list determines which key is presented to server - - if len(identity) > 0 { - s, err := terminal.PublicKey(identity, []byte(passPhrase)) - if err != nil { - return Connection{}, errors.Wrapf(err, "failed to parse identity %q", identity) - } - - signers = append(signers, s) - logrus.Debugf("SSH Ident Key %q %s %s", identity, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - - if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { - logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer(s) enabled", sock) - - c, err := net.Dial("unix", sock) - if err != nil { - return Connection{}, err - } - - agentSigners, err := agent.NewClient(c).Signers() - if err != nil { - return Connection{}, err - } - signers = append(signers, agentSigners...) - - if logrus.IsLevelEnabled(logrus.DebugLevel) { - for _, s := range agentSigners { - logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - } - } - - var authMethods []ssh.AuthMethod - if len(signers) > 0 { - var dedup = make(map[string]ssh.Signer) - // Dedup signers based on fingerprint, ssh-agent keys override CONTAINER_SSHKEY - for _, s := range signers { - fp := ssh.FingerprintSHA256(s.PublicKey()) - if _, found := dedup[fp]; found { - logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - dedup[fp] = s - } - - var uniq []ssh.Signer - for _, s := range dedup { - uniq = append(uniq, s) - } - authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { - return uniq, nil - })) - } - - if pw, found := _url.User.Password(); found { - authMethods = append(authMethods, ssh.Password(pw)) - } - - if len(authMethods) == 0 { - callback := func() (string, error) { - pass, err := terminal.ReadPassword("Login password:") - return string(pass), err - } - authMethods = append(authMethods, ssh.PasswordCallback(callback)) - } - - port := _url.Port() - if port == "" { - port = "22" - } - - callback := ssh.InsecureIgnoreHostKey() - if secure { - host := _url.Hostname() - if port != "22" { - host = fmt.Sprintf("[%s]:%s", host, port) - } - key := terminal.HostKey(host) - if key != nil { - callback = ssh.FixedHostKey(key) - } - } - - bastion, err := ssh.Dial("tcp", - net.JoinHostPort(_url.Hostname(), port), - &ssh.ClientConfig{ - User: _url.User.Username(), - Auth: authMethods, - HostKeyCallback: callback, - HostKeyAlgorithms: []string{ - ssh.KeyAlgoRSA, - ssh.KeyAlgoDSA, - ssh.KeyAlgoECDSA256, - ssh.KeyAlgoECDSA384, - ssh.KeyAlgoECDSA521, - ssh.KeyAlgoED25519, - }, - Timeout: 5 * time.Second, - }, - ) - if err != nil { - return Connection{}, errors.Wrapf(err, "connection to bastion host (%s) failed", _url.String()) - } - - connection := Connection{URI: _url} - connection.Client = &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - return bastion.Dial("unix", _url.Path) - }, - }} - return connection, nil + return nil, fmt.Errorf("ping response was %d", response.StatusCode) } func unixClient(_url *url.URL) Connection { @@ -315,7 +239,8 @@ func unixClient(_url *url.URL) Connection { return connection } -// DoRequest assembles the http request and returns the response +// DoRequest assembles the http request and returns the response. +// The caller must close the response body. func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMethod, endpoint string, queryParams url.Values, headers http.Header, pathValues ...string) (*APIResponse, error) { var ( err error @@ -361,7 +286,7 @@ func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMeth // Give the Do three chances in the case of a comm/service hiccup for i := 1; i <= 3; i++ { - response, err = c.Client.Do(req) // nolint + response, err = c.Client.Do(req) //nolint:bodyclose // The caller has to close the body. if err == nil { break } @@ -378,7 +303,7 @@ func (c *Connection) GetDialer(ctx context.Context) (net.Conn, error) { return transport.DialContext(ctx, c.URI.Scheme, c.URI.String()) } - return nil, errors.New("Unable to get dial context") + return nil, errors.New("unable to get dial context") } // IsInformational returns true if the response code is 1xx diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go index 4f4b5a36a62..660d9da6b33 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go @@ -2,6 +2,7 @@ package containers import ( "context" + "errors" "io" "net/http" "net/url" @@ -9,7 +10,6 @@ import ( "github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/copy" "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/pkg/errors" ) // Stat checks if the specified path is on the container. Note that the stat @@ -55,8 +55,6 @@ func CopyFromArchive(ctx context.Context, nameOrID string, path string, reader i } // CopyFromArchiveWithOptions copy files into container -// -// FIXME: remove this function and make CopyFromArchive accept the option as the last parameter in podman 4.0 func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path string, reader io.Reader, options *CopyOptions) (entities.ContainerCopyFunc, error) { conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go index d84b4705215..e23ee5ee940 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "net" @@ -14,11 +15,10 @@ import ( "strconv" "time" + "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/utils" "github.com/moby/term" - "github.com/pkg/errors" "github.com/sirupsen/logrus" terminal "golang.org/x/term" ) @@ -54,8 +54,6 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri stderr = (io.Writer)(nil) } - logrus.Infof("Going to attach to container %q", nameOrID) - conn, err := bindings.GetClient(ctx) if err != nil { return err @@ -77,7 +75,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri detachKeysInBytes, err = term.ToBytes(options.GetDetachKeys()) if err != nil { - return errors.Wrapf(err, "invalid detach keys") + return fmt.Errorf("invalid detach keys: %w", err) } } if isSet.stdin { @@ -161,7 +159,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri go func() { logrus.Debugf("Copying STDIN to socket") - _, err := utils.CopyDetachable(socket, stdin, detachKeysInBytes) + _, err := util.CopyDetachable(socket, stdin, detachKeysInBytes) if err != nil && err != define.ErrDetach { logrus.Errorf("Failed to write input to service: %v", err) } @@ -263,7 +261,7 @@ func DemuxHeader(r io.Reader, buffer []byte) (fd, sz int, err error) { fd = int(buffer[0]) if fd < 0 || fd > 3 { - err = errors.Wrapf(ErrLostSync, fmt.Sprintf(`channel "%d" found, 0-3 supported`, fd)) + err = fmt.Errorf(`channel "%d" found, 0-3 supported: %w`, fd, ErrLostSync) return } @@ -357,7 +355,7 @@ func attachHandleResize(ctx, winCtx context.Context, winChange chan os.Signal, i resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w)) } if resizeErr != nil { - logrus.Infof("Failed to resize TTY: %v", resizeErr) + logrus.Debugf("Failed to resize TTY: %v", resizeErr) } } @@ -499,7 +497,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar if options.GetAttachInput() { go func() { logrus.Debugf("Copying STDIN to socket") - _, err := utils.CopyDetachable(socket, options.InputStream, []byte{}) + _, err := util.CopyDetachable(socket, options.InputStream, []byte{}) if err != nil { logrus.Errorf("Failed to write input to service: %v", err) } @@ -520,7 +518,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar return fmt.Errorf("exec session %s has a terminal and must have STDOUT enabled", sessionID) } // If not multiplex'ed, read from server and write to stdout - _, err := utils.CopyDetachable(options.GetOutputStream(), socket, []byte{}) + _, err := util.CopyDetachable(options.GetOutputStream(), socket, []byte{}) if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go index bcb94448860..8c072f588fb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go @@ -39,7 +39,7 @@ func Checkpoint(ctx context.Context, nameOrID string, options *CheckpointOptions } defer response.Body.Close() - if !export { + if response.StatusCode != http.StatusOK || !export { return &report, response.Process(&report) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go index be421cc8b95..80ec7bc6fc0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go @@ -2,6 +2,8 @@ package containers import ( "context" + "errors" + "fmt" "io" "net/http" "net/url" @@ -12,8 +14,6 @@ import ( "github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( @@ -25,7 +25,7 @@ var ( // the most recent number of containers. The pod and size booleans indicate that pod information and rootfs // size information should also be included. Finally, the sync bool synchronizes the OCI runtime and // container state. -func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) { // nolint:typecheck +func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) { if options == nil { options = new(ListOptions) } @@ -201,7 +201,6 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) error { if options == nil { options = new(StartOptions) } - logrus.Infof("Going to start container %q", nameOrID) conn, err := bindings.GetClient(ctx) if err != nil { return err @@ -339,7 +338,7 @@ func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) erro // Wait blocks until the given container reaches a condition. If not provided, the condition will // default to stopped. If the condition is stopped, an exit code for the container will be provided. The // nameOrID can be a container name or a partial/full ID. -func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, error) { // nolint +func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, error) { if options == nil { options = new(WaitOptions) } @@ -449,7 +448,7 @@ func ContainerInit(ctx context.Context, nameOrID string, options *InitOptions) e defer response.Body.Close() if response.StatusCode == http.StatusNotModified { - return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", nameOrID) + return fmt.Errorf("container %s has already been created in runtime: %w", nameOrID, define.ErrCtrStateInvalid) } return response.Process(nil) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go index 3ad5d67d24d..c5c8760ed80 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go @@ -3,6 +3,8 @@ package containers import ( "bytes" "context" + "errors" + "fmt" "net/http" "strings" @@ -11,7 +13,6 @@ import ( "github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/domain/entities" jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,12 +28,12 @@ func ExecCreate(ctx context.Context, nameOrID string, config *handlers.ExecCreat } if config == nil { - return "", errors.Errorf("must provide a configuration for exec session") + return "", errors.New("must provide a configuration for exec session") } requestJSON, err := json.Marshal(config) if err != nil { - return "", errors.Wrapf(err, "error marshalling exec config to JSON") + return "", fmt.Errorf("marshalling exec config to JSON: %w", err) } jsonReader := strings.NewReader(string(requestJSON)) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go index 8ea8ed7faea..9ebfd90dadf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go @@ -2,13 +2,13 @@ package containers import ( "context" + "errors" "fmt" "io" "net/http" "strconv" "github.com/containers/podman/v4/pkg/bindings" - "github.com/pkg/errors" ) // Logs obtains a container's logs given the options provided. The logs are then sent to the diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go index 81d491bb7d9..8e9f39dd5a2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go @@ -7,9 +7,10 @@ import ( "github.com/containers/podman/v4/libpod/define" ) -//go:generate go run ../generator/generator.go LogOptions // LogOptions describe finer control of log content or // how the content is formatted. +// +//go:generate go run ../generator/generator.go LogOptions type LogOptions struct { Follow *bool Since *string @@ -20,10 +21,11 @@ type LogOptions struct { Until *string } -//go:generate go run ../generator/generator.go CommitOptions // CommitOptions describe details about the resulting committed // image as defined by repo and tag. None of these options // are required. +// +//go:generate go run ../generator/generator.go CommitOptions type CommitOptions struct { Author *string Changes []string @@ -35,16 +37,18 @@ type CommitOptions struct { Tag *string } -//go:generate go run ../generator/generator.go AttachOptions // AttachOptions are optional options for attaching to containers +// +//go:generate go run ../generator/generator.go AttachOptions type AttachOptions struct { DetachKeys *string // Keys to detach from running container Logs *bool // Flag to return all logs from container when true Stream *bool // Flag only return container logs when false and Logs is true } -//go:generate go run ../generator/generator.go CheckpointOptions // CheckpointOptions are optional options for checkpointing containers +// +//go:generate go run ../generator/generator.go CheckpointOptions type CheckpointOptions struct { Export *string CreateImage *string @@ -58,8 +62,9 @@ type CheckpointOptions struct { FileLocks *bool } -//go:generate go run ../generator/generator.go RestoreOptions // RestoreOptions are optional options for restoring containers +// +//go:generate go run ../generator/generator.go RestoreOptions type RestoreOptions struct { IgnoreRootfs *bool IgnoreVolumes *bool @@ -82,12 +87,14 @@ type RestoreOptions struct { FileLocks *bool } -//go:generate go run ../generator/generator.go CreateOptions // CreateOptions are optional options for creating containers +// +//go:generate go run ../generator/generator.go CreateOptions type CreateOptions struct{} -//go:generate go run ../generator/generator.go DiffOptions // DiffOptions are optional options for creating containers +// +//go:generate go run ../generator/generator.go DiffOptions type DiffOptions struct { // By the default diff will compare against the parent layer. Change the Parent if you want to compare against something else. Parent *string @@ -95,39 +102,46 @@ type DiffOptions struct { DiffType *string } -//go:generate go run ../generator/generator.go ExecInspectOptions // ExecInspectOptions are optional options for inspecting // exec sessions +// +//go:generate go run ../generator/generator.go ExecInspectOptions type ExecInspectOptions struct{} -//go:generate go run ../generator/generator.go ExecStartOptions // ExecStartOptions are optional options for starting // exec sessions +// +//go:generate go run ../generator/generator.go ExecStartOptions type ExecStartOptions struct { } -//go:generate go run ../generator/generator.go HealthCheckOptions // HealthCheckOptions are optional options for checking // the health of a container +// +//go:generate go run ../generator/generator.go HealthCheckOptions type HealthCheckOptions struct{} -//go:generate go run ../generator/generator.go MountOptions // MountOptions are optional options for mounting // containers +// +//go:generate go run ../generator/generator.go MountOptions type MountOptions struct{} -//go:generate go run ../generator/generator.go UnmountOptions // UnmountOptions are optional options for unmounting // containers +// +//go:generate go run ../generator/generator.go UnmountOptions type UnmountOptions struct{} -//go:generate go run ../generator/generator.go MountedContainerPathsOptions // MountedContainerPathsOptions are optional options for getting // container mount paths +// +//go:generate go run ../generator/generator.go MountedContainerPathsOptions type MountedContainerPathsOptions struct{} -//go:generate go run ../generator/generator.go ListOptions // ListOptions are optional options for listing containers +// +//go:generate go run ../generator/generator.go ListOptions type ListOptions struct { All *bool External *bool @@ -138,14 +152,16 @@ type ListOptions struct { Sync *bool } -//go:generate go run ../generator/generator.go PruneOptions // PruneOptions are optional options for pruning containers +// +//go:generate go run ../generator/generator.go PruneOptions type PruneOptions struct { Filters map[string][]string } -//go:generate go run ../generator/generator.go RemoveOptions // RemoveOptions are optional options for removing containers +// +//go:generate go run ../generator/generator.go RemoveOptions type RemoveOptions struct { Depend *bool Ignore *bool @@ -154,106 +170,123 @@ type RemoveOptions struct { Timeout *uint } -//go:generate go run ../generator/generator.go InspectOptions // InspectOptions are optional options for inspecting containers +// +//go:generate go run ../generator/generator.go InspectOptions type InspectOptions struct { Size *bool } -//go:generate go run ../generator/generator.go KillOptions // KillOptions are optional options for killing containers +// +//go:generate go run ../generator/generator.go KillOptions type KillOptions struct { Signal *string } -//go:generate go run ../generator/generator.go PauseOptions // PauseOptions are optional options for pausing containers +// +//go:generate go run ../generator/generator.go PauseOptions type PauseOptions struct{} -//go:generate go run ../generator/generator.go RestartOptions // RestartOptions are optional options for restarting containers +// +//go:generate go run ../generator/generator.go RestartOptions type RestartOptions struct { Timeout *int } -//go:generate go run ../generator/generator.go StartOptions // StartOptions are optional options for starting containers +// +//go:generate go run ../generator/generator.go StartOptions type StartOptions struct { DetachKeys *string Recursive *bool } -//go:generate go run ../generator/generator.go StatsOptions // StatsOptions are optional options for getting stats on containers +// +//go:generate go run ../generator/generator.go StatsOptions type StatsOptions struct { Stream *bool Interval *int } -//go:generate go run ../generator/generator.go TopOptions // TopOptions are optional options for getting running // processes in containers +// +//go:generate go run ../generator/generator.go TopOptions type TopOptions struct { Descriptors *[]string } -//go:generate go run ../generator/generator.go UnpauseOptions // UnpauseOptions are optional options for unpausing containers +// +//go:generate go run ../generator/generator.go UnpauseOptions type UnpauseOptions struct{} -//go:generate go run ../generator/generator.go WaitOptions // WaitOptions are optional options for waiting on containers +// +//go:generate go run ../generator/generator.go WaitOptions type WaitOptions struct { Condition []define.ContainerStatus Interval *string } -//go:generate go run ../generator/generator.go StopOptions // StopOptions are optional options for stopping containers +// +//go:generate go run ../generator/generator.go StopOptions type StopOptions struct { Ignore *bool Timeout *uint } -//go:generate go run ../generator/generator.go ExportOptions // ExportOptions are optional options for exporting containers +// +//go:generate go run ../generator/generator.go ExportOptions type ExportOptions struct{} -//go:generate go run ../generator/generator.go InitOptions // InitOptions are optional options for initing containers +// +//go:generate go run ../generator/generator.go InitOptions type InitOptions struct{} -//go:generate go run ../generator/generator.go ShouldRestartOptions // ShouldRestartOptions +// +//go:generate go run ../generator/generator.go ShouldRestartOptions type ShouldRestartOptions struct{} -//go:generate go run ../generator/generator.go RenameOptions // RenameOptions are options for renaming containers. // The Name field is required. +// +//go:generate go run ../generator/generator.go RenameOptions type RenameOptions struct { Name *string } -//go:generate go run ../generator/generator.go ResizeTTYOptions // ResizeTTYOptions are optional options for resizing // container TTYs +// +//go:generate go run ../generator/generator.go ResizeTTYOptions type ResizeTTYOptions struct { Height *int Width *int Running *bool } -//go:generate go run ../generator/generator.go ResizeExecTTYOptions // ResizeExecTTYOptions are optional options for resizing // container ExecTTYs +// +//go:generate go run ../generator/generator.go ResizeExecTTYOptions type ResizeExecTTYOptions struct { Height *int Width *int } -//go:generate go run ../generator/generator.go ExecStartAndAttachOptions // ExecStartAndAttachOptions are optional options for resizing // container ExecTTYs +// +//go:generate go run ../generator/generator.go ExecStartAndAttachOptions type ExecStartAndAttachOptions struct { // OutputStream will be attached to container's STDOUT OutputStream *io.WriteCloser @@ -272,19 +305,24 @@ type ExecStartAndAttachOptions struct { AttachInput *bool } -//go:generate go run ../generator/generator.go ExistsOptions // ExistsOptions are optional options for checking if a container exists +// +//go:generate go run ../generator/generator.go ExistsOptions type ExistsOptions struct { // External checks for containers created outside of Podman External *bool } -//go:generate go run ../generator/generator.go CopyOptions // CopyOptions are options for copying to containers. +// +//go:generate go run ../generator/generator.go CopyOptions type CopyOptions struct { // If used with CopyFromArchive and set to true it will change ownership of files from the source tar archive // to the primary uid/gid of the target container. Chown *bool `schema:"copyUIDGID"` // Map to translate path names. Rename map[string]string + // NoOverwriteDirNonDir when true prevents an existing directory or file from being overwritten + // by the other type. + NoOverwriteDirNonDir *bool } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go index 8fcfe71a60c..e43d7975250 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go @@ -46,3 +46,18 @@ func (o *CopyOptions) GetRename() map[string]string { } return o.Rename } + +// WithNoOverwriteDirNonDir set field NoOverwriteDirNonDir to given value +func (o *CopyOptions) WithNoOverwriteDirNonDir(value bool) *CopyOptions { + o.NoOverwriteDirNonDir = &value + return o +} + +// GetNoOverwriteDirNonDir returns value of field NoOverwriteDirNonDir +func (o *CopyOptions) GetNoOverwriteDirNonDir() bool { + if o.NoOverwriteDirNonDir == nil { + var z bool + return z + } + return *o.NoOverwriteDirNonDir +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go new file mode 100644 index 00000000000..7cda7c3064a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go @@ -0,0 +1,31 @@ +package containers + +import ( + "context" + "net/http" + "strings" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + jsoniter "github.com/json-iterator/go" +) + +func Update(ctx context.Context, options *entities.ContainerUpdateOptions) (string, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + + resources, err := jsoniter.MarshalToString(options.Specgen.ResourceLimits) + if err != nil { + return "", err + } + stringReader := strings.NewReader(resources) + response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/%s/update", nil, nil, options.NameOrID) + if err != nil { + return "", err + } + defer response.Body.Close() + + return options.NameOrID, response.Process(nil) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go b/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go index eb95764ba38..d9dfa95a69c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go @@ -2,10 +2,11 @@ package bindings import ( "encoding/json" - "io/ioutil" + "errors" + "fmt" + "io" "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/pkg/errors" ) var ( @@ -28,9 +29,9 @@ func (h APIResponse) Process(unmarshalInto interface{}) error { // ProcessWithError drains the response body, and processes the HTTP status code // Note: Closing the response.Body is left to the caller func (h APIResponse) ProcessWithError(unmarshalInto interface{}, unmarshalErrorInto interface{}) error { - data, err := ioutil.ReadAll(h.Response.Body) + data, err := io.ReadAll(h.Response.Body) if err != nil { - return errors.Wrap(err, "unable to process API response") + return fmt.Errorf("unable to process API response: %w", err) } if h.IsSuccess() || h.IsRedirection() { if unmarshalInto != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go index 51dcd2aa5ee..b58153a5a82 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go @@ -5,15 +5,14 @@ import ( "compress/gzip" "context" "encoding/json" + "errors" "fmt" "io" "io/fs" - "io/ioutil" "net/http" "net/url" "os" "path/filepath" - "regexp" "runtime" "strconv" "strings" @@ -25,10 +24,10 @@ import ( "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/regexp" "github.com/docker/go-units" "github.com/hashicorp/go-multierror" jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -37,9 +36,7 @@ type devino struct { Ino uint64 } -var ( - iidRegex = regexp.MustCompile(`^[0-9a-f]{12}`) -) +var iidRegex = regexp.Delayed(`^[0-9a-f]{12}`) // Build creates an image using a containerfile reference func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { @@ -65,6 +62,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO params.Set("annotations", l) } + if cppflags := options.CPPFlags; len(cppflags) > 0 { + l, err := jsoniter.MarshalToString(cppflags) + if err != nil { + return nil, err + } + params.Set("cppflags", l) + } + if options.AllPlatforms { params.Add("allplatforms", "1") } @@ -73,6 +78,20 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO for _, tag := range options.AdditionalTags { params.Add("t", tag) } + if additionalBuildContexts := options.AdditionalBuildContexts; len(additionalBuildContexts) > 0 { + additionalBuildContextMap, err := jsoniter.Marshal(additionalBuildContexts) + if err != nil { + return nil, err + } + params.Set("additionalbuildcontexts", string(additionalBuildContextMap)) + } + if options.IDMappingOptions != nil { + idmappingsOptions, err := jsoniter.Marshal(options.IDMappingOptions) + if err != nil { + return nil, err + } + params.Set("idmappingoptions", string(idmappingsOptions)) + } if buildArgs := options.Args; len(buildArgs) > 0 { bArgs, err := jsoniter.MarshalToString(buildArgs) if err != nil { @@ -155,6 +174,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } else { params.Set("rm", "0") } + if options.CommonBuildOpts.OmitHistory { + params.Set("omithistory", "1") + } else { + params.Set("omithistory", "0") + } if len(options.From) > 0 { params.Set("from", options.From) } @@ -204,6 +228,39 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if len(options.Manifest) > 0 { params.Set("manifest", options.Manifest) } + if options.CacheFrom != nil { + cacheFrom := []string{} + for _, cacheSrc := range options.CacheFrom { + cacheFrom = append(cacheFrom, cacheSrc.String()) + } + cacheFromJSON, err := jsoniter.MarshalToString(cacheFrom) + if err != nil { + return nil, err + } + params.Set("cachefrom", cacheFromJSON) + } + + switch options.SkipUnusedStages { + case types.OptionalBoolTrue: + params.Set("skipunusedstages", "1") + case types.OptionalBoolFalse: + params.Set("skipunusedstages", "0") + } + + if options.CacheTo != nil { + cacheTo := []string{} + for _, cacheSrc := range options.CacheTo { + cacheTo = append(cacheTo, cacheSrc.String()) + } + cacheToJSON, err := jsoniter.MarshalToString(cacheTo) + if err != nil { + return nil, err + } + params.Set("cacheto", cacheToJSON) + } + if int64(options.CacheTTL) != 0 { + params.Set("cachettl", options.CacheTTL.String()) + } if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { params.Set("memswap", strconv.Itoa(int(memSwap))) } @@ -247,6 +304,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO params.Add("platform", platform) } } + + for _, volume := range options.CommonBuildOpts.Volumes { + params.Add("volume", volume) + } + + for _, group := range options.GroupAdd { + params.Add("groupadd", group) + } + var err error var contextDir string if contextDir, err = filepath.EvalSymlinks(options.ContextDirectory); err == nil { @@ -359,11 +425,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO dontexcludes := []string{"!Dockerfile", "!Containerfile", "!.dockerignore", "!.containerignore"} for _, c := range containerFiles { if c == "/dev/stdin" { - content, err := ioutil.ReadAll(os.Stdin) + content, err := io.ReadAll(os.Stdin) if err != nil { return nil, err } - tmpFile, err := ioutil.TempFile("", "build") + tmpFile, err := os.CreateTemp("", "build") if err != nil { return nil, err } @@ -429,7 +495,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if arr[0] == "src" { // read specified secret into a tmp file // move tmp file to tar and change secret source to relative tmp file - tmpSecretFile, err := ioutil.TempFile(options.ContextDirectory, "podman-build-secret") + tmpSecretFile, err := os.CreateTemp(options.ContextDirectory, "podman-build-secret") if err != nil { return nil, err } @@ -495,7 +561,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if logrus.IsLevelEnabled(logrus.DebugLevel) { if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { if keep, _ := strconv.ParseBool(v); keep { - t, _ := ioutil.TempFile("", "build_*_client") + t, _ := os.CreateTemp("", "build_*_client") defer t.Close() body = io.TeeReader(response.Body, t) } @@ -523,14 +589,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if err := dec.Decode(&s); err != nil { if errors.Is(err, io.ErrUnexpectedEOF) { - return nil, errors.Wrap(err, "server probably quit") + return nil, fmt.Errorf("server probably quit: %w", err) } // EOF means the stream is over in which case we need // to have read the id. if errors.Is(err, io.EOF) && id != "" { break } - return &entities.BuildReport{ID: id}, errors.Wrap(err, "decoding stream") + return &entities.BuildReport{ID: id}, fmt.Errorf("decoding stream: %w", err) } switch { @@ -554,11 +620,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { pm, err := fileutils.NewPatternMatcher(excludes) if err != nil { - return nil, errors.Wrapf(err, "error processing excludes list %v", excludes) + return nil, fmt.Errorf("processing excludes list %v: %w", excludes, err) } if len(sources) == 0 { - return nil, errors.New("No source(s) provided for build") + return nil, errors.New("no source(s) provided for build") } pr, pw := io.Pipe() @@ -583,27 +649,31 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { return err } + separator := string(filepath.Separator) // check if what we are given is an empty dir, if so then continue w/ it. Else return. // if we are given a file or a symlink, we do not want to exclude it. - if d.IsDir() && s == path { - var p *os.File - p, err = os.Open(path) - if err != nil { - return err - } - defer p.Close() - _, err = p.Readdir(1) - if err != io.EOF { - return nil // non empty root dir, need to return - } else if err != nil { - logrus.Errorf("While reading directory %v: %v", path, err) + if s == path { + separator = "" + if d.IsDir() { + var p *os.File + p, err = os.Open(path) + if err != nil { + return err + } + defer p.Close() + _, err = p.Readdir(1) + if err != io.EOF { + return nil // non empty root dir, need to return + } else if err != nil { + logrus.Errorf("While reading directory %v: %v", path, err) + } } } - name := filepath.ToSlash(strings.TrimPrefix(path, s+string(filepath.Separator))) + name := filepath.ToSlash(strings.TrimPrefix(path, s+separator)) - excluded, err := pm.Matches(name) // nolint:staticcheck + excluded, err := pm.Matches(name) //nolint:staticcheck if err != nil { - return errors.Wrapf(err, "error checking if %q is excluded", name) + return fmt.Errorf("checking if %q is excluded: %w", name, err) } if excluded { // Note: filepath.SkipDir is not possible to use given .dockerignore semantics. @@ -701,12 +771,12 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { } func parseDockerignore(root string) ([]string, error) { - ignore, err := ioutil.ReadFile(filepath.Join(root, ".containerignore")) + ignore, err := os.ReadFile(filepath.Join(root, ".containerignore")) if err != nil { var dockerIgnoreErr error - ignore, dockerIgnoreErr = ioutil.ReadFile(filepath.Join(root, ".dockerignore")) + ignore, dockerIgnoreErr = os.ReadFile(filepath.Join(root, ".dockerignore")) if dockerIgnoreErr != nil && !os.IsNotExist(dockerIgnoreErr) { - return nil, errors.Wrapf(err, "error reading .containerignore: '%s'", root) + return nil, err } } rawexcludes := strings.Split(string(ignore), "\n") diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go index 32e2ba9af9d..07bb8cbcdcc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go @@ -11,7 +11,7 @@ import ( func checkHardLink(fi os.FileInfo) (devino, bool) { st := fi.Sys().(*syscall.Stat_t) return devino{ - Dev: uint64(st.Dev), // nolint: unconvert + Dev: uint64(st.Dev), //nolint: unconvert Ino: st.Ino, }, st.Nlink > 1 } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go index 8e3b079298f..ea7d445dbaa 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go @@ -2,6 +2,7 @@ package images import ( "context" + "errors" "fmt" "io" "net/http" @@ -14,7 +15,6 @@ import ( "github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/pkg/errors" ) // Exists a lightweight way to determine if an image exists in local storage. It returns a @@ -267,47 +267,6 @@ func Import(ctx context.Context, r io.Reader, options *ImportOptions) (*entities return &report, response.Process(&report) } -// Push is the binding for libpod's v2 endpoints for push images. Note that -// `source` must be a referring to an image in the remote's container storage. -// The destination must be a reference to a registry (i.e., of docker transport -// or be normalized to one). Other transports are rejected as they do not make -// sense in a remote context. -func Push(ctx context.Context, source string, destination string, options *PushOptions) error { - if options == nil { - options = new(PushOptions) - } - conn, err := bindings.GetClient(ctx) - if err != nil { - return err - } - // TODO: have a global system context we can pass around (1st argument) - header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) - if err != nil { - return err - } - - params, err := options.ToParams() - if err != nil { - return err - } - // SkipTLSVerify is special. We need to delete the param added by - // toparams and change the key and flip the bool - if options.SkipTLSVerify != nil { - params.Del("SkipTLSVerify") - params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) - } - params.Set("destination", destination) - - path := fmt.Sprintf("/images/%s/push", source) - response, err := conn.DoRequest(ctx, nil, http.MethodPost, path, params, header) - if err != nil { - return err - } - defer response.Body.Close() - - return response.Process(err) -} - // Search is the binding for libpod's v2 endpoints for Search images. func Search(ctx context.Context, term string, options *SearchOptions) ([]entities.ImageSearchReport, error) { if options == nil { @@ -323,13 +282,12 @@ func Search(ctx context.Context, term string, options *SearchOptions) ([]entitie } params.Set("term", term) - // Note: we have to verify if skipped is false. + // SkipTLSVerify is special. It's not being serialized by ToParams() + // because we need to flip the boolean. if options.SkipTLSVerify != nil { - params.Del("SkipTLSVerify") params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) } - // TODO: have a global system context we can pass around (1st argument) header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, "", "") if err != nil { return nil, err @@ -348,3 +306,23 @@ func Search(ctx context.Context, term string, options *SearchOptions) ([]entitie return results, nil } + +func Scp(ctx context.Context, source, destination *string, options ScpOptions) (reports.ScpReport, error) { + rep := reports.ScpReport{} + + conn, err := bindings.GetClient(ctx) + if err != nil { + return rep, err + } + params, err := options.ToParams() + if err != nil { + return rep, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, fmt.Sprintf("/images/scp/%s", *source), params, nil) + if err != nil { + return rep, err + } + defer response.Body.Close() + + return rep, response.Process(&rep) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go index 20e47179ce6..43592f6e216 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go @@ -3,9 +3,9 @@ package images import ( "context" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net/http" "os" "strconv" @@ -15,7 +15,6 @@ import ( "github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/pkg/errors" ) // Pull is the binding for libpod's v2 endpoints for pulling images. Note that @@ -36,13 +35,12 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, } params.Set("reference", rawImage) + // SkipTLSVerify is special. It's not being serialized by ToParams() + // because we need to flip the boolean. if options.SkipTLSVerify != nil { - params.Del("SkipTLSVerify") - // Note: we have to verify if skipped is false. params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) } - // TODO: have a global system context we can pass around (1st argument) header, err := auth.MakeXRegistryAuthHeader(&types.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) if err != nil { return nil, err @@ -58,15 +56,20 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, return nil, response.Process(err) } - // Historically pull writes status to stderr - stderr := io.Writer(os.Stderr) + var writer io.Writer if options.GetQuiet() { - stderr = ioutil.Discard + writer = io.Discard + } else if progressWriter := options.GetProgressWriter(); progressWriter != nil { + writer = progressWriter + } else { + // Historically push writes status to stderr + writer = os.Stderr } dec := json.NewDecoder(response.Body) var images []string var pullErrors []error +LOOP: for { var report entities.ImagePullReport if err := dec.Decode(&report); err != nil { @@ -78,21 +81,21 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, select { case <-response.Request.Context().Done(): - break + break LOOP default: // non-blocking select } switch { case report.Stream != "": - fmt.Fprint(stderr, report.Stream) + fmt.Fprint(writer, report.Stream) case report.Error != "": pullErrors = append(pullErrors, errors.New(report.Error)) case len(report.Images) > 0: images = report.Images case report.ID != "": default: - return images, errors.Errorf("failed to parse pull results stream, unexpected input: %v", report) + return images, fmt.Errorf("failed to parse pull results stream, unexpected input: %v", report) } } return images, errorhandling.JoinErrors(pullErrors) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go new file mode 100644 index 00000000000..c04ffbf9047 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go @@ -0,0 +1,99 @@ +package images + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strconv" + + imageTypes "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/auth" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" +) + +// Push is the binding for libpod's endpoints for push images. Note that +// `source` must be a referring to an image in the remote's container storage. +// The destination must be a reference to a registry (i.e., of docker transport +// or be normalized to one). Other transports are rejected as they do not make +// sense in a remote context. +func Push(ctx context.Context, source string, destination string, options *PushOptions) error { + if options == nil { + options = new(PushOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) + if err != nil { + return err + } + + params, err := options.ToParams() + if err != nil { + return err + } + // SkipTLSVerify is special. It's not being serialized by ToParams() + // because we need to flip the boolean. + if options.SkipTLSVerify != nil { + params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) + } + params.Set("destination", destination) + + path := fmt.Sprintf("/images/%s/push", source) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, path, params, header) + if err != nil { + return err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return response.Process(err) + } + + var writer io.Writer + if options.GetQuiet() { + writer = io.Discard + } else if progressWriter := options.GetProgressWriter(); progressWriter != nil { + writer = progressWriter + } else { + // Historically push writes status to stderr + writer = os.Stderr + } + + dec := json.NewDecoder(response.Body) +LOOP: + for { + var report entities.ImagePushReport + if err := dec.Decode(&report); err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + select { + case <-response.Request.Context().Done(): + break LOOP + default: + // non-blocking select + } + + switch { + case report.Stream != "": + fmt.Fprint(writer, report.Stream) + case report.Error != "": + // There can only be one error. + return errors.New(report.Error) + default: + return fmt.Errorf("failed to parse push results stream, unexpected input: %v", report) + } + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go index b80bacf4539..eb3eef10c47 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go @@ -16,9 +16,6 @@ func Remove(ctx context.Context, images []string, options *RemoveOptions) (*enti if options == nil { options = new(RemoveOptions) } - // FIXME - bindings tests are missing for this endpoint. Once the CI is - // re-enabled for bindings, we need to add them. At the time of writing, - // the tests don't compile. var report types.LibpodImagesRemoveReport conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go index 8e5e7ee929e..3f9e503ca90 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go @@ -1,11 +1,14 @@ package images import ( + "io" + buildahDefine "github.com/containers/buildah/define" ) -//go:generate go run ../generator/generator.go RemoveOptions // RemoveOptions are optional options for image removal +// +//go:generate go run ../generator/generator.go RemoveOptions type RemoveOptions struct { // All removes all images All *bool @@ -13,10 +16,15 @@ type RemoveOptions struct { Force *bool // Ignore if a specified image does not exist and do not throw an error. Ignore *bool + // Confirms if given name is a manifest list and removes it, otherwise returns error. + LookupManifest *bool + // Does not remove dangling parent images + NoPrune *bool } -//go:generate go run ../generator/generator.go DiffOptions // DiffOptions are optional options image diffs +// +//go:generate go run ../generator/generator.go DiffOptions type DiffOptions struct { // By the default diff will compare against the parent layer. Change the Parent if you want to compare against something else. Parent *string @@ -24,8 +32,9 @@ type DiffOptions struct { DiffType *string } -//go:generate go run ../generator/generator.go ListOptions // ListOptions are optional options for listing images +// +//go:generate go run ../generator/generator.go ListOptions type ListOptions struct { // All lists all image in the image store including dangling images All *bool @@ -33,35 +42,40 @@ type ListOptions struct { Filters map[string][]string } -//go:generate go run ../generator/generator.go GetOptions // GetOptions are optional options for inspecting an image +// +//go:generate go run ../generator/generator.go GetOptions type GetOptions struct { // Size computes the amount of storage the image consumes Size *bool } -//go:generate go run ../generator/generator.go TreeOptions // TreeOptions are optional options for a tree-based representation // of the image +// +//go:generate go run ../generator/generator.go TreeOptions type TreeOptions struct { // WhatRequires ... WhatRequires *bool } -//go:generate go run ../generator/generator.go HistoryOptions // HistoryOptions are optional options image history +// +//go:generate go run ../generator/generator.go HistoryOptions type HistoryOptions struct { } -//go:generate go run ../generator/generator.go LoadOptions // LoadOptions are optional options for loading an image +// +//go:generate go run ../generator/generator.go LoadOptions type LoadOptions struct { // Reference is the name of the loaded image Reference *string } -//go:generate go run ../generator/generator.go ExportOptions // ExportOptions are optional options for exporting images +// +//go:generate go run ../generator/generator.go ExportOptions type ExportOptions struct { // Compress the image Compress *bool @@ -71,8 +85,9 @@ type ExportOptions struct { OciAcceptUncompressedLayers *bool } -//go:generate go run ../generator/generator.go PruneOptions // PruneOptions are optional options for pruning images +// +//go:generate go run ../generator/generator.go PruneOptions type PruneOptions struct { // Prune all images All *bool @@ -82,18 +97,21 @@ type PruneOptions struct { Filters map[string][]string } -//go:generate go run ../generator/generator.go TagOptions // TagOptions are optional options for tagging images +// +//go:generate go run ../generator/generator.go TagOptions type TagOptions struct { } -//go:generate go run ../generator/generator.go UntagOptions // UntagOptions are optional options for untagging images +// +//go:generate go run ../generator/generator.go UntagOptions type UntagOptions struct { } -//go:generate go run ../generator/generator.go ImportOptions // ImportOptions are optional options for importing images +// +//go:generate go run ../generator/generator.go ImportOptions type ImportOptions struct { // Changes to be applied to the image Changes *[]string @@ -111,8 +129,9 @@ type ImportOptions struct { Variant *string } -//go:generate go run ../generator/generator.go PushOptions // PushOptions are optional options for importing images +// +//go:generate go run ../generator/generator.go PushOptions type PushOptions struct { // All indicates whether to push all images related to the image list All *bool @@ -121,18 +140,29 @@ type PushOptions struct { Authfile *string // Compress tarball image layers when pushing to a directory using the 'dir' transport. Compress *bool + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *string // Manifest type of the pushed image Format *string // Password for authenticating against the registry. Password *string + // ProgressWriter is a writer where push progress are sent. + // Since API handler for image push is quiet by default, WithQuiet(false) is necessary for + // the writer to receive progress messages. + ProgressWriter *io.Writer `schema:"-"` // SkipTLSVerify to skip HTTPS and certificate verification. - SkipTLSVerify *bool + SkipTLSVerify *bool `schema:"-"` + // RemoveSignatures Discard any pre-existing signatures in the image. + RemoveSignatures *bool // Username for authenticating against the registry. Username *string + // Quiet can be specified to suppress progress when pushing. + Quiet *bool } -//go:generate go run ../generator/generator.go SearchOptions // SearchOptions are optional options for searching images on registries +// +//go:generate go run ../generator/generator.go SearchOptions type SearchOptions struct { // Authfile is the path to the authentication file. Ignored for remote // calls. @@ -142,13 +172,14 @@ type SearchOptions struct { // Limit the number of results. Limit *int // SkipTLSVerify to skip HTTPS and certificate verification. - SkipTLSVerify *bool + SkipTLSVerify *bool `schema:"-"` // ListTags search the available tags of the repository ListTags *bool } -//go:generate go run ../generator/generator.go PullOptions // PullOptions are optional options for pulling images +// +//go:generate go run ../generator/generator.go PullOptions type PullOptions struct { // AllTags can be specified to pull all tags of an image. Note // that this only works if the image does not include a tag. @@ -166,11 +197,13 @@ type PullOptions struct { Policy *string // Password for authenticating against the registry. Password *string + // ProgressWriter is a writer where pull progress are sent. + ProgressWriter *io.Writer `schema:"-"` // Quiet can be specified to suppress pull progress when pulling. Ignored // for remote calls. Quiet *bool // SkipTLSVerify to skip HTTPS and certificate verification. - SkipTLSVerify *bool + SkipTLSVerify *bool `schema:"-"` // Username for authenticating against the registry. Username *string // Variant will overwrite the local variant for image pulls. @@ -182,7 +215,13 @@ type BuildOptions struct { buildahDefine.BuildOptions } -//go:generate go run ../generator/generator.go ExistsOptions // ExistsOptions are optional options for checking if an image exists +// +//go:generate go run ../generator/generator.go ExistsOptions type ExistsOptions struct { } + +type ScpOptions struct { + Quiet *bool + Destination *string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go index 4cd52518584..c1a88fd9ed5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go @@ -2,6 +2,7 @@ package images import ( + "io" "net/url" "github.com/containers/podman/v4/pkg/bindings/internal/util" @@ -107,6 +108,21 @@ func (o *PullOptions) GetPassword() string { return *o.Password } +// WithProgressWriter set field ProgressWriter to given value +func (o *PullOptions) WithProgressWriter(value io.Writer) *PullOptions { + o.ProgressWriter = &value + return o +} + +// GetProgressWriter returns value of field ProgressWriter +func (o *PullOptions) GetProgressWriter() io.Writer { + if o.ProgressWriter == nil { + var z io.Writer + return z + } + return *o.ProgressWriter +} + // WithQuiet set field Quiet to given value func (o *PullOptions) WithQuiet(value bool) *PullOptions { o.Quiet = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go index 4985c945123..817d873f8c5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go @@ -2,6 +2,7 @@ package images import ( + "io" "net/url" "github.com/containers/podman/v4/pkg/bindings/internal/util" @@ -62,6 +63,21 @@ func (o *PushOptions) GetCompress() bool { return *o.Compress } +// WithCompressionFormat set field CompressionFormat to given value +func (o *PushOptions) WithCompressionFormat(value string) *PushOptions { + o.CompressionFormat = &value + return o +} + +// GetCompressionFormat returns value of field CompressionFormat +func (o *PushOptions) GetCompressionFormat() string { + if o.CompressionFormat == nil { + var z string + return z + } + return *o.CompressionFormat +} + // WithFormat set field Format to given value func (o *PushOptions) WithFormat(value string) *PushOptions { o.Format = &value @@ -92,6 +108,21 @@ func (o *PushOptions) GetPassword() string { return *o.Password } +// WithProgressWriter set field ProgressWriter to given value +func (o *PushOptions) WithProgressWriter(value io.Writer) *PushOptions { + o.ProgressWriter = &value + return o +} + +// GetProgressWriter returns value of field ProgressWriter +func (o *PushOptions) GetProgressWriter() io.Writer { + if o.ProgressWriter == nil { + var z io.Writer + return z + } + return *o.ProgressWriter +} + // WithSkipTLSVerify set field SkipTLSVerify to given value func (o *PushOptions) WithSkipTLSVerify(value bool) *PushOptions { o.SkipTLSVerify = &value @@ -107,6 +138,21 @@ func (o *PushOptions) GetSkipTLSVerify() bool { return *o.SkipTLSVerify } +// WithRemoveSignatures set field RemoveSignatures to given value +func (o *PushOptions) WithRemoveSignatures(value bool) *PushOptions { + o.RemoveSignatures = &value + return o +} + +// GetRemoveSignatures returns value of field RemoveSignatures +func (o *PushOptions) GetRemoveSignatures() bool { + if o.RemoveSignatures == nil { + var z bool + return z + } + return *o.RemoveSignatures +} + // WithUsername set field Username to given value func (o *PushOptions) WithUsername(value string) *PushOptions { o.Username = &value @@ -121,3 +167,18 @@ func (o *PushOptions) GetUsername() string { } return *o.Username } + +// WithQuiet set field Quiet to given value +func (o *PushOptions) WithQuiet(value bool) *PushOptions { + o.Quiet = &value + return o +} + +// GetQuiet returns value of field Quiet +func (o *PushOptions) GetQuiet() bool { + if o.Quiet == nil { + var z bool + return z + } + return *o.Quiet +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go index 613a33183de..8972ac93c78 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go @@ -61,3 +61,33 @@ func (o *RemoveOptions) GetIgnore() bool { } return *o.Ignore } + +// WithLookupManifest set field LookupManifest to given value +func (o *RemoveOptions) WithLookupManifest(value bool) *RemoveOptions { + o.LookupManifest = &value + return o +} + +// GetLookupManifest returns value of field LookupManifest +func (o *RemoveOptions) GetLookupManifest() bool { + if o.LookupManifest == nil { + var z bool + return z + } + return *o.LookupManifest +} + +// WithNoPrune set field NoPrune to given value +func (o *RemoveOptions) WithNoPrune(value bool) *RemoveOptions { + o.NoPrune = &value + return o +} + +// GetNoPrune returns value of field NoPrune +func (o *RemoveOptions) GetNoPrune() bool { + if o.NoPrune == nil { + var z bool + return z + } + return *o.NoPrune +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go new file mode 100644 index 00000000000..5a1178cb170 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go @@ -0,0 +1,12 @@ +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// ToParams formats struct fields to be passed to API service +func (o *ScpOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go b/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go index f8f99d6c188..52ce1473861 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go @@ -74,6 +74,9 @@ func ToParams(o interface{}) (url.Values, error) { } paramName := fieldName if pn, ok := sType.Field(i).Tag.Lookup("schema"); ok { + if pn == "-" { + continue + } paramName = pn } switch { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go index 01c3c248d1d..a84658826b5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go @@ -1,25 +1,30 @@ package secrets -//go:generate go run ../generator/generator.go ListOptions // ListOptions are optional options for inspecting secrets +// +//go:generate go run ../generator/generator.go ListOptions type ListOptions struct { Filters map[string][]string } -//go:generate go run ../generator/generator.go InspectOptions // InspectOptions are optional options for inspecting secrets +// +//go:generate go run ../generator/generator.go InspectOptions type InspectOptions struct { } -//go:generate go run ../generator/generator.go RemoveOptions // RemoveOptions are optional options for removing secrets +// +//go:generate go run ../generator/generator.go RemoveOptions type RemoveOptions struct { } -//go:generate go run ../generator/generator.go CreateOptions // CreateOptions are optional options for Creating secrets +// +//go:generate go run ../generator/generator.go CreateOptions type CreateOptions struct { Name *string Driver *string DriverOpts map[string]string + Labels map[string]string } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go index 6b1666a427b..c9c88e1f343 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go @@ -61,3 +61,18 @@ func (o *CreateOptions) GetDriverOpts() map[string]string { } return o.DriverOpts } + +// WithLabels set field Labels to given value +func (o *CreateOptions) WithLabels(value map[string]string) *CreateOptions { + o.Labels = value + return o +} + +// GetLabels returns value of field Labels +func (o *CreateOptions) GetLabels() map[string]string { + if o.Labels == nil { + var z map[string]string + return z + } + return o.Labels +} diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go index 0ccca5b6ee0..7d4e67896fc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go +++ b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go @@ -3,13 +3,14 @@ package copy import ( "encoding/base64" "encoding/json" + "errors" + "fmt" "net/http" "os" "path/filepath" "strings" "github.com/containers/podman/v4/libpod/define" - "github.com/pkg/errors" ) // XDockerContainerPathStatHeader is the *key* in http headers pointing to the @@ -18,7 +19,7 @@ const XDockerContainerPathStatHeader = "X-Docker-Container-Path-Stat" // ErrENOENT mimics the stdlib's ErrENOENT and can be used to implement custom logic // while preserving the user-visible error message. -var ErrENOENT = errors.New("No such file or directory") +var ErrENOENT = errors.New("no such file or directory") // FileInfo describes a file or directory and is returned by // (*CopyItem).Stat(). @@ -29,7 +30,7 @@ type FileInfo = define.FileInfo func EncodeFileInfo(info *FileInfo) (string, error) { buf, err := json.Marshal(&info) if err != nil { - return "", errors.Wrap(err, "failed to serialize file stats") + return "", fmt.Errorf("failed to serialize file stats: %w", err) } return base64.URLEncoding.EncodeToString(buf), nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/parse.go b/vendor/github.com/containers/podman/v4/pkg/copy/parse.go index 93edec5fa6a..50f1d211d8f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/copy/parse.go +++ b/vendor/github.com/containers/podman/v4/pkg/copy/parse.go @@ -1,9 +1,8 @@ package copy import ( + "fmt" "strings" - - "github.com/pkg/errors" ) // ParseSourceAndDestination parses the source and destination input into a @@ -19,7 +18,7 @@ func ParseSourceAndDestination(source, destination string) (string, string, stri destContainer, destPath := parseUserInput(destination) if len(sourcePath) == 0 || len(destPath) == 0 { - return "", "", "", "", errors.Errorf("invalid arguments %q, %q: you must specify paths", source, destination) + return "", "", "", "", fmt.Errorf("invalid arguments %q, %q: you must specify paths", source, destination) } return sourceContainer, sourcePath, destContainer, destPath, nil diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/apply.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/apply.go new file mode 100644 index 00000000000..e6d0775e497 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/apply.go @@ -0,0 +1,21 @@ +package entities + +var ( + TypePVC = "PersistentVolumeClaim" + TypePod = "Pod" + TypeService = "Service" +) + +// ApplyOptions controls the deployment of kube yaml files to a Kubernetes Cluster +type ApplyOptions struct { + // Kubeconfig - path to the cluster's kubeconfig file. + Kubeconfig string + // Namespace - namespace to deploy the workload in on the cluster. + Namespace string + // CACertFile - the path to the CA cert file for the Kubernetes cluster. + CACertFile string + // File - the path to the Kubernetes yaml to deploy. + File string + // Service - creates a service for the container being deployed. + Service bool +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go index a5562e7c9ab..519d2b7da71 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go @@ -1,13 +1,13 @@ package entities import ( + "errors" "sort" "strings" "time" "github.com/containers/common/libnetwork/types" "github.com/containers/podman/v4/pkg/ps/define" - "github.com/pkg/errors" ) // ListContainer describes a container suitable for listing @@ -166,7 +166,7 @@ func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainer case "pod": sort.Sort(psSortedPod{psOutput}) default: - return nil, errors.Errorf("invalid option for --sort, options are: command, created, id, image, names, runningfor, size, or status") + return nil, errors.New("invalid option for --sort, options are: command, created, id, image, names, runningfor, size, or status") } return psOutput, nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go index 1db8b9951d4..940e6b0889b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go @@ -47,17 +47,16 @@ type ContainerRunlabelOptions struct { } // ContainerRunlabelReport contains the results from executing container-runlabel. -type ContainerRunlabelReport struct { -} +type ContainerRunlabelReport struct{} type WaitOptions struct { Condition []define.ContainerStatus Interval time.Duration + Ignore bool Latest bool } type WaitReport struct { - Id string //nolint Error error ExitCode int32 } @@ -72,15 +71,19 @@ type StringSliceReport struct { } type PauseUnPauseOptions struct { - All bool + Filters map[string][]string + All bool + Latest bool } type PauseUnpauseReport struct { - Err error - Id string //nolint + Err error + Id string //nolint:revive,stylecheck + RawInput string } type StopOptions struct { + Filters map[string][]string All bool Ignore bool Latest bool @@ -89,7 +92,7 @@ type StopOptions struct { type StopReport struct { Err error - Id string //nolint + Id string //nolint:revive,stylecheck RawInput string } @@ -111,11 +114,12 @@ type KillOptions struct { type KillReport struct { Err error - Id string //nolint + Id string //nolint:revive,stylecheck RawInput string } type RestartOptions struct { + Filters map[string][]string All bool Latest bool Running bool @@ -123,11 +127,13 @@ type RestartOptions struct { } type RestartReport struct { - Err error - Id string //nolint + Err error + Id string //nolint:revive,stylecheck + RawInput string } type RmOptions struct { + Filters map[string][]string All bool Depend bool Force bool @@ -165,14 +171,17 @@ type CopyOptions struct { Chown bool // Map to translate path names. Rename map[string]string + // NoOverwriteDirNonDir when true prevents an existing directory or file from being overwritten + // by the other type + NoOverwriteDirNonDir bool } type CommitReport struct { - Id string //nolint + Id string //nolint:revive,stylecheck } type ContainerExportOptions struct { - Output string + Output io.Writer } type CheckpointOptions struct { @@ -194,7 +203,8 @@ type CheckpointOptions struct { type CheckpointReport struct { Err error `json:"-"` - Id string `json:"Id` //nolint + Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"-"` RuntimeDuration int64 `json:"runtime_checkpoint_duration"` CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } @@ -220,13 +230,14 @@ type RestoreOptions struct { type RestoreReport struct { Err error `json:"-"` - Id string `json:"Id` //nolint + Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"-"` RuntimeDuration int64 `json:"runtime_restore_duration"` CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } type ContainerCreateReport struct { - Id string //nolint + Id string //nolint:revive,stylecheck } // AttachOptions describes the cli and other values @@ -305,7 +316,7 @@ type ContainerStartOptions struct { // ContainerStartReport describes the response from starting // containers from the cli type ContainerStartReport struct { - Id string //nolint + Id string //nolint:revive,stylecheck RawInput string Err error ExitCode int @@ -349,7 +360,7 @@ type ContainerRunOptions struct { // a container type ContainerRunReport struct { ExitCode int - Id string //nolint + Id string //nolint:revive,stylecheck } // ContainerCleanupOptions are the CLI values for the @@ -366,7 +377,8 @@ type ContainerCleanupOptions struct { // container cleanup type ContainerCleanupReport struct { CleanErr error - Id string //nolint + Id string //nolint:revive,stylecheck + RawInput string RmErr error RmiErr error } @@ -381,8 +393,9 @@ type ContainerInitOptions struct { // ContainerInitReport describes the results of a // container init type ContainerInitReport struct { - Err error - Id string //nolint + Err error + Id string //nolint:revive,stylecheck + RawInput string } // ContainerMountOptions describes the input values for mounting containers @@ -404,7 +417,7 @@ type ContainerUnmountOptions struct { // ContainerMountReport describes the response from container mount type ContainerMountReport struct { Err error - Id string //nolint + Id string //nolint:revive,stylecheck Name string Path string } @@ -412,7 +425,7 @@ type ContainerMountReport struct { // ContainerUnmountReport describes the response from umounting a container type ContainerUnmountReport struct { Err error - Id string //nolint + Id string //nolint:revive,stylecheck } // ContainerPruneOptions describes the options needed @@ -431,7 +444,7 @@ type ContainerPortOptions struct { // ContainerPortReport describes the output needed for // the CLI to output ports type ContainerPortReport struct { - Id string //nolint + Id string //nolint:revive,stylecheck Ports []nettypes.PortMapping } @@ -441,6 +454,9 @@ type ContainerCpOptions struct { Pause bool // Extract the tarfile into the destination directory. Extract bool + // OverwriteDirNonDir allows for overwriting a directory with a + // non-directory and vice versa. + OverwriteDirNonDir bool } // ContainerStatsOptions describes input options for getting @@ -479,3 +495,9 @@ type ContainerCloneOptions struct { Run bool Force bool } + +// ContainerUpdateOptions containers options for updating an existing containers cgroup configuration +type ContainerUpdateOptions struct { + NameOrID string + Specgen *specgen.SpecGenerator +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go index 32faa74afd8..aa5ad69260e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go @@ -30,26 +30,30 @@ func (m EngineMode) String() string { // PodmanConfig combines the defaults and settings from the file system with the // flags given in os.Args. Some runtime state is also stored here. type PodmanConfig struct { - *config.Config *pflag.FlagSet - CgroupUsage string // rootless code determines Usage message - ConmonPath string // --conmon flag will set Engine.ConmonPath - CPUProfile string // Hidden: Should CPU profile be taken - EngineMode EngineMode // ABI or Tunneling mode - Identity string // ssh identity for connecting to server - MaxWorks int // maximum number of parallel threads - MemoryProfile string // Hidden: Should memory profile be taken - NoOut bool // Don't output to stdout - RegistriesConf string // allows for specifying a custom registries.conf - Remote bool // Connection to Podman API Service will use RESTful API - RuntimePath string // --runtime flag will set Engine.RuntimePath - RuntimeFlags []string // global flags for the container runtime - Syslog bool // write to StdOut and Syslog, not supported when tunneling - Trace bool // Hidden: Trace execution - URI string // URI to RESTful API Service - - Runroot string - StorageDriver string - StorageOpts []string + ContainersConf *config.Config + ContainersConfDefaultsRO *config.Config // The read-only! defaults from containers.conf. + DockerConfig string // Used for Docker compatibility + CgroupUsage string // rootless code determines Usage message + ConmonPath string // --conmon flag will set Engine.ConmonPath + CPUProfile string // Hidden: Should CPU profile be taken + EngineMode EngineMode // ABI or Tunneling mode + Identity string // ssh identity for connecting to server + MaxWorks int // maximum number of parallel threads + MemoryProfile string // Hidden: Should memory profile be taken + RegistriesConf string // allows for specifying a custom registries.conf + Remote bool // Connection to Podman API Service will use RESTful API + RuntimePath string // --runtime flag will set Engine.RuntimePath + RuntimeFlags []string // global flags for the container runtime + Syslog bool // write to StdOut and Syslog, not supported when tunneling + Trace bool // Hidden: Trace execution + URI string // URI to RESTful API Service + + Runroot string + StorageDriver string + StorageOpts []string + SSHMode string + MachineMode bool + TransientStore bool } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go index 6b70a34524a..06a6372fbfd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go @@ -13,7 +13,7 @@ import ( type ContainerCopyFunc func() error -type ContainerEngine interface { +type ContainerEngine interface { //nolint:interfacebloat AutoUpdate(ctx context.Context, options AutoUpdateOptions) ([]*AutoUpdateReport, []error) Config(ctx context.Context) (*config.Config, error) ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error @@ -51,16 +51,20 @@ type ContainerEngine interface { ContainerTop(ctx context.Context, options TopOptions) (*StringSliceReport, error) ContainerUnmount(ctx context.Context, nameOrIDs []string, options ContainerUnmountOptions) ([]*ContainerUnmountReport, error) ContainerUnpause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error) + ContainerUpdate(ctx context.Context, options *ContainerUpdateOptions) (string, error) ContainerWait(ctx context.Context, namesOrIds []string, options WaitOptions) ([]WaitReport, error) Diff(ctx context.Context, namesOrIds []string, options DiffOptions) (*DiffReport, error) Events(ctx context.Context, opts EventsOptions) error + GenerateSpec(ctx context.Context, opts *GenerateSpecOptions) (*GenerateSpecReport, error) GenerateSystemd(ctx context.Context, nameOrID string, opts GenerateSystemdOptions) (*GenerateSystemdReport, error) GenerateKube(ctx context.Context, nameOrIDs []string, opts GenerateKubeOptions) (*GenerateKubeReport, error) SystemPrune(ctx context.Context, options SystemPruneOptions) (*SystemPruneReport, error) HealthCheckRun(ctx context.Context, nameOrID string, options HealthCheckOptions) (*define.HealthCheckResults, error) Info(ctx context.Context) (*define.Info, error) + KubeApply(ctx context.Context, body io.Reader, opts ApplyOptions) error NetworkConnect(ctx context.Context, networkname string, options NetworkConnectOptions) error - NetworkCreate(ctx context.Context, network types.Network) (*types.Network, error) + NetworkCreate(ctx context.Context, network types.Network, createOptions *types.NetworkCreateOptions) (*types.Network, error) + NetworkUpdate(ctx context.Context, networkname string, options NetworkUpdateOptions) error NetworkDisconnect(ctx context.Context, networkname string, options NetworkDisconnectOptions) error NetworkExists(ctx context.Context, networkname string) (*BoolReport, error) NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]types.Network, []error, error) @@ -71,8 +75,9 @@ type ContainerEngine interface { PlayKube(ctx context.Context, body io.Reader, opts PlayKubeOptions) (*PlayKubeReport, error) PlayKubeDown(ctx context.Context, body io.Reader, opts PlayKubeDownOptions) (*PlayKubeReport, error) PodCreate(ctx context.Context, specg PodSpec) (*PodCreateReport, error) + PodClone(ctx context.Context, podClone PodCloneOptions) (*PodCloneReport, error) PodExists(ctx context.Context, nameOrID string) (*BoolReport, error) - PodInspect(ctx context.Context, options PodInspectOptions) (*PodInspectReport, error) + PodInspect(ctx context.Context, namesOrID []string, options InspectOptions) ([]*PodInspectReport, []error, error) PodKill(ctx context.Context, namesOrIds []string, options PodKillOptions) ([]*PodKillReport, error) PodLogs(ctx context.Context, pod string, options PodLogsOptions) error PodPause(ctx context.Context, namesOrIds []string, options PodPauseOptions) ([]*PodPauseReport, error) @@ -103,4 +108,5 @@ type ContainerEngine interface { VolumePrune(ctx context.Context, options VolumePruneOptions) ([]*reports.PruneReport, error) VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error) VolumeUnmount(ctx context.Context, namesOrIds []string) ([]*VolumeUnmountReport, error) + VolumeReload(ctx context.Context) (*VolumeReloadReport, error) } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go index 5011d82aa79..0de0a889cab 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go @@ -4,10 +4,11 @@ import ( "context" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/pkg/domain/entities/reports" ) -type ImageEngine interface { +type ImageEngine interface { //nolint:interfacebloat Build(ctx context.Context, containerFiles []string, opts BuildOptions) (*BuildReport, error) Config(ctx context.Context) (*config.Config, error) Exists(ctx context.Context, nameOrID string) (*BoolReport, error) @@ -22,18 +23,18 @@ type ImageEngine interface { Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error) Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error + Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error) SetTrust(ctx context.Context, args []string, options SetTrustOptions) error ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error) Shutdown(ctx context.Context) Tag(ctx context.Context, nameOrID string, tags []string, options ImageTagOptions) error - Transfer(ctx context.Context, source ImageScpOptions, dest ImageScpOptions, parentFlags []string) error Tree(ctx context.Context, nameOrID string, options ImageTreeOptions) (*ImageTreeReport, error) Unmount(ctx context.Context, images []string, options ImageUnmountOptions) ([]*ImageUnmountReport, error) Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error ManifestCreate(ctx context.Context, name string, images []string, opts ManifestCreateOptions) (string, error) ManifestExists(ctx context.Context, name string) (*BoolReport, error) - ManifestInspect(ctx context.Context, name string) ([]byte, error) + ManifestInspect(ctx context.Context, name string, opts ManifestInspectOptions) ([]byte, error) ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go index d8ba0f1d3ea..34a6fe04890 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go @@ -14,6 +14,7 @@ type Event struct { // TODO: it would be nice to have full control over the types at some // point and fork such Docker types. dockerEvents.Message + HealthStatus string `json:",omitempty"` } // ConvertToLibpodEvent converts an entities event to a libpod one. @@ -33,6 +34,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { image := e.Actor.Attributes["image"] name := e.Actor.Attributes["name"] details := e.Actor.Attributes + podID := e.Actor.Attributes["podId"] delete(details, "image") delete(details, "name") delete(details, "containerExitCode") @@ -44,7 +46,9 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { Status: status, Time: time.Unix(0, e.TimeNano), Type: t, + HealthStatus: e.HealthStatus, Details: libpodEvents.Details{ + PodID: podID, Attributes: details, }, } @@ -59,7 +63,8 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event { attributes["image"] = e.Image attributes["name"] = e.Name attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode) - return &Event{dockerEvents.Message{ + attributes["podId"] = e.PodID + message := dockerEvents.Message{ // Compatibility with clients that still look for deprecated API elements Status: e.Status.String(), ID: e.ID, @@ -73,5 +78,9 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event { Scope: "local", Time: e.Time.Unix(), TimeNano: e.Time.UnixNano(), - }} + } + return &Event{ + message, + e.HealthStatus, + } } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go index 73dd64ecd02..314996497a2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go @@ -4,34 +4,21 @@ import "io" // GenerateSystemdOptions control the generation of systemd unit files. type GenerateSystemdOptions struct { - // Name - use container/pod name instead of its ID. - Name bool - // New - create a new container instead of starting a new one. - New bool - // RestartPolicy - systemd restart policy. - RestartPolicy *string - // RestartSec - systemd service restartsec. Configures the time to sleep before restarting a service. - RestartSec *uint - // StartTimeout - time when starting the container. - StartTimeout *uint - // StopTimeout - time when stopping the container. - StopTimeout *uint - // ContainerPrefix - systemd unit name prefix for containers - ContainerPrefix string - // PodPrefix - systemd unit name prefix for pods - PodPrefix string - // Separator - systemd unit name separator between name/id and prefix - Separator string - // NoHeader - skip header generation - NoHeader bool - // TemplateUnitFile - make use of %i and %I to differentiate between the different instances of the unit - TemplateUnitFile bool - // Wants - systemd wants list for the container or pods - Wants []string - // After - systemd after list for the container or pods - After []string - // Requires - systemd requires list for the container or pods - Requires []string + Name bool + New bool + RestartPolicy *string + RestartSec *uint + StartTimeout *uint + StopTimeout *uint + ContainerPrefix string + PodPrefix string + Separator string + NoHeader bool + TemplateUnitFile bool + Wants []string + After []string + Requires []string + AdditionalEnvVariables []string } // GenerateSystemdReport @@ -46,6 +33,8 @@ type GenerateKubeOptions struct { Service bool } +type KubeGenerateOptions = GenerateKubeOptions + // GenerateKubeReport // // FIXME: Podman4.0 should change io.Reader to io.ReaderCloser @@ -53,3 +42,14 @@ type GenerateKubeReport struct { // Reader - the io.Reader to reader the generated YAML file. Reader io.Reader } + +type GenerateSpecReport struct { + Data []byte +} + +type GenerateSpecOptions struct { + ID string + FileName string + Compact bool + Name bool +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go index 7081c5d25a5..46c4a22e95d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go @@ -1,12 +1,15 @@ package entities import ( + "io" "net/url" "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/signer" "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" "github.com/containers/podman/v4/pkg/inspect" "github.com/containers/podman/v4/pkg/trust" "github.com/docker/docker/api/types/container" @@ -46,14 +49,14 @@ type Image struct { HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` } -func (i *Image) Id() string { // nolint +func (i *Image) Id() string { //nolint:revive,stylecheck return i.ID } // swagger:model LibpodImageSummary type ImageSummary struct { ID string `json:"Id"` - ParentId string // nolint + ParentId string //nolint:revive,stylecheck RepoTags []string RepoDigests []string Created int64 @@ -66,13 +69,12 @@ type ImageSummary struct { Dangling bool `json:",omitempty"` // Podman extensions - Names []string `json:",omitempty"` - Digest string `json:",omitempty"` - ConfigDigest string `json:",omitempty"` - History []string `json:",omitempty"` + Names []string `json:",omitempty"` + Digest string `json:",omitempty"` + History []string `json:",omitempty"` } -func (i *ImageSummary) Id() string { // nolint +func (i *ImageSummary) Id() string { //nolint:revive,stylecheck return i.ID } @@ -94,6 +96,8 @@ type ImageRemoveOptions struct { Ignore bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest bool + // NoPrune will not remove dangling images + NoPrune bool } // ImageRemoveReport is the response for removing one or more image(s) from storage @@ -154,6 +158,11 @@ type ImagePullOptions struct { SkipTLSVerify types.OptionalBool // PullPolicy whether to pull new image PullPolicy config.PullPolicy + // Writer is used to display copy information including progress bars. + Writer io.Writer + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig } // ImagePullReport is the response from pulling one or more images. @@ -193,8 +202,7 @@ type ImagePushOptions struct { // image. Default is manifest type of source, with fallbacks. // Ignored for remote calls. Format string - // Quiet can be specified to suppress pull progress when pulling. Ignored - // for remote calls. + // Quiet can be specified to suppress push progress when pushing. Quiet bool // Rm indicates whether to remove the manifest list if push succeeds Rm bool @@ -203,15 +211,49 @@ type ImagePushOptions struct { RemoveSignatures bool // SignaturePolicy to use when pulling. Ignored for remote calls. SignaturePolicy string + // Signers, if non-empty, asks for signatures to be added during the copy + // using the provided signers. + // Rejected for remote calls. + Signers []*signer.Signer // SignBy adds a signature at the destination using the specified key. // Ignored for remote calls. SignBy string + // SignPassphrase, if non-empty, specifies a passphrase to use when signing + // with the key ID from SignBy. + SignPassphrase string + // SignBySigstorePrivateKeyFile, if non-empty, asks for a signature to be added + // during the copy, using a sigstore private key file at the provided path. + // Ignored for remote calls. + SignBySigstorePrivateKeyFile string + // SignSigstorePrivateKeyPassphrase is the passphrase to use when signing with + // SignBySigstorePrivateKeyFile. + SignSigstorePrivateKeyPassphrase []byte // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify types.OptionalBool // Progress to get progress notifications Progress chan types.ProgressProperties // CompressionFormat is the format to use for the compression of the blobs CompressionFormat string + // Writer is used to display copy information including progress bars. + Writer io.Writer + // OciEncryptConfig when non-nil indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int +} + +// ImagePushReport is the response from pushing an image. +// Currently only used in the remote API. +type ImagePushReport struct { + // Stream used to provide push progress + Stream string `json:"stream,omitempty"` + // Error contains text of errors from pushing + Error string `json:"error,omitempty"` } // ImageSearchOptions are the arguments for searching images. @@ -291,7 +333,7 @@ type ImageImportOptions struct { } type ImageImportReport struct { - Id string // nolint + Id string //nolint:revive,stylecheck } // ImageSaveOptions provide options for saving images. @@ -311,7 +353,8 @@ type ImageSaveOptions struct { // Output - write image to the specified path. Output string // Quiet - suppress output when copying images - Quiet bool + Quiet bool + SignaturePolicy string } // ImageScpOptions provide options for securely copying images to and from a remote host @@ -326,6 +369,8 @@ type ImageScpOptions struct { Image string `json:"image,omitempty"` // User is used in conjunction with Transfer to determine if a valid user was given to save from/load into User string `json:"user,omitempty"` + // Tag is the name to be used for the image on the destination + Tag string `json:"tag,omitempty"` } // ImageScpConnections provides the ssh related information used in remote image transfer @@ -398,8 +443,7 @@ type ImageUnmountOptions struct { // ImageMountReport describes the response from image mount type ImageMountReport struct { - Err error - Id string // nolint + Id string //nolint:revive,stylecheck Name string Repositories []string Path string @@ -408,5 +452,5 @@ type ImageMountReport struct { // ImageUnmountReport describes the response from umounting an image type ImageUnmountReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go new file mode 100644 index 00000000000..4fd0413c9b0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go @@ -0,0 +1,40 @@ +package entities + +import "github.com/containers/podman/v4/libpod/define" + +type ListReporter struct { + Name string + Default bool + Created string + Running bool + Starting bool + LastUp string + Stream string + VMType string + CPUs uint64 + Memory string + DiskSize string + Port int + RemoteUsername string + IdentityPath string +} + +// MachineInfo contains info on the machine host and version info +type MachineInfo struct { + Host *MachineHostInfo `json:"Host"` + Version define.Version `json:"Version"` +} + +// MachineHostInfo contains info on the machine host +type MachineHostInfo struct { + Arch string `json:"Arch"` + CurrentMachine string `json:"CurrentMachine"` + DefaultMachine string `json:"DefaultMachine"` + EventsDir string `json:"EventsDir"` + MachineConfigDir string `json:"MachineConfigDir"` + MachineImageDir string `json:"MachineImageDir"` + MachineState string `json:"MachineState"` + NumberOfMachines int `json:"NumberOfMachines"` + OS string `json:"OS"` + VMType string `json:"VMType"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go index 81f3e837bb8..030dc4b6d1b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go @@ -4,7 +4,18 @@ import "github.com/containers/image/v5/types" // ManifestCreateOptions provides model for creating manifest type ManifestCreateOptions struct { + // True when adding lists to include all images All bool `schema:"all"` + // Amend an extant list if there's already one with the desired name + Amend bool `schema:"amend"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` +} + +// ManifestInspectOptions provides model for inspecting manifest +type ManifestInspectOptions struct { + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` } // ManifestAddOptions provides model for adding digests to manifest list @@ -32,6 +43,8 @@ type ManifestAddOptions struct { type ManifestAnnotateOptions struct { // Annotation to add to manifest list Annotation []string `json:"annotation" schema:"annotation"` + // Annotations to add to manifest list by a map which is prefferred over Annotation + Annotations map[string]string `json:"annotations" schema:"annotations"` // Arch overrides the architecture for the image Arch string `json:"arch" schema:"arch"` // Feature list for the image @@ -61,12 +74,39 @@ type ManifestModifyOptions struct { ManifestRemoveOptions } +// ManifestPushReport provides the model for the pushed manifest +// +// swagger:model +type ManifestPushReport struct { + // ID of the pushed manifest + ID string `json:"Id"` + // Stream used to provide push progress + Stream string `json:"stream,omitempty"` + // Error contains text of errors from pushing + Error string `json:"error,omitempty"` +} + // ManifestRemoveOptions provides the model for removing digests from a manifest // // swagger:model type ManifestRemoveOptions struct { } +// ManifestRemoveReport provides the model for the removed manifest +// +// swagger:model +type ManifestRemoveReport struct { + // Deleted manifest list. + Deleted []string `json:",omitempty"` + // Untagged images. Can be longer than Deleted. + Untagged []string `json:",omitempty"` + // Errors associated with operation + Errors []string `json:",omitempty"` + // ExitCode describes the exit codes as described in the `podman rmi` + // man page. + ExitCode int +} + // ManifestModifyReport provides the model for removed digests and changed manifest // // swagger:model diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go index 0f901c7f14d..0ac3d5bfa7b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go @@ -22,7 +22,7 @@ type NetworkReloadOptions struct { // NetworkReloadReport describes the results of reloading a container network. type NetworkReloadReport struct { - // nolint:stylecheck,revive + //nolint:stylecheck,revive Id string Err error } @@ -41,17 +41,26 @@ type NetworkRmReport struct { // NetworkCreateOptions describes options to create a network type NetworkCreateOptions struct { - DisableDNS bool - Driver string - Gateways []net.IP - Internal bool - Labels map[string]string - MacVLAN string - Ranges []string - Subnets []string - IPv6 bool + DisableDNS bool + Driver string + Gateways []net.IP + Internal bool + Labels map[string]string + MacVLAN string + NetworkDNSServers []string + Ranges []string + Subnets []string + IPv6 bool // Mapping of driver options and values. Options map[string]string + // IgnoreIfExists if true, do not fail if the network already exists + IgnoreIfExists bool +} + +// NetworkUpdateOptions describes options to update a network +type NetworkUpdateOptions struct { + AddDNSServers []string `json:"adddnsservers"` + RemoveDNSServers []string `json:"removednsservers"` } // NetworkCreateReport describes a created network for the cli @@ -81,8 +90,7 @@ type NetworkPruneReport struct { Error error } -// NetworkPruneOptions describes options for pruning -// unused cni networks +// NetworkPruneOptions describes options for pruning unused networks type NetworkPruneOptions struct { Filters map[string][]string } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go index c9dc3f08c2d..bd14b29680a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go @@ -54,6 +54,16 @@ type PlayKubeOptions struct { LogOptions []string // Start - don't start the pod if false Start types.OptionalBool + // ServiceContainer - creates a service container that is started before and is stopped after all pods. + ServiceContainer bool + // Userns - define the user namespace to use. + Userns string + // IsRemote - was the request triggered by running podman-remote + IsRemote bool + // Force - remove volumes on --down + Force bool + // PublishPorts - configure how to expose ports configured inside the K8S YAML file + PublishPorts []string } // PlayKubePod represents a single pod and associated containers created by play kube @@ -84,13 +94,24 @@ type PlayKubeReport struct { // Volumes - volumes created by play kube. Volumes []PlayKubeVolume PlayKubeTeardown + Secrets []PlaySecret } +type KubePlayReport = PlayKubeReport + // PlayKubeDownOptions are options for tearing down pods -type PlayKubeDownOptions struct{} +type PlayKubeDownOptions struct { + // Force - remove volumes if passed + Force bool +} // PlayKubeDownReport contains the results of tearing down play kube type PlayKubeTeardown struct { - StopReport []*PodStopReport - RmReport []*PodRmReport + StopReport []*PodStopReport + RmReport []*PodRmReport + VolumeRmReport []*VolumeRmReport +} + +type PlaySecret struct { + CreateReport *SecretCreateReport } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go index cac961cf16f..36676d56d2e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go @@ -20,15 +20,15 @@ type PodKillOptions struct { type PodKillReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type ListPodsReport struct { Cgroup string Containers []*ListPodContainer Created time.Time - Id string // nolint - InfraId string // nolint + Id string //nolint:revive,stylecheck + InfraId string //nolint:revive,stylecheck Name string Namespace string // Network names connected to infra container @@ -38,7 +38,7 @@ type ListPodsReport struct { } type ListPodContainer struct { - Id string // nolint + Id string //nolint:revive,stylecheck Names string Status string } @@ -50,7 +50,7 @@ type PodPauseOptions struct { type PodPauseReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodunpauseOptions struct { @@ -60,7 +60,7 @@ type PodunpauseOptions struct { type PodUnpauseReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodStopOptions struct { @@ -72,7 +72,7 @@ type PodStopOptions struct { type PodStopReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodRestartOptions struct { @@ -82,7 +82,7 @@ type PodRestartOptions struct { type PodRestartReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodStartOptions struct { @@ -92,7 +92,7 @@ type PodStartOptions struct { type PodStartReport struct { Errs []error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodRmOptions struct { @@ -105,7 +105,7 @@ type PodRmOptions struct { type PodRmReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck } // PddSpec is an abstracted version of PodSpecGen designed to eventually accept options @@ -122,6 +122,7 @@ type PodCreateOptions struct { CreateCommand []string `json:"create_command,omitempty"` Devices []string `json:"devices,omitempty"` DeviceReadBPs []string `json:"device_read_bps,omitempty"` + ExitPolicy string `json:"exit_policy,omitempty"` Hostname string `json:"hostname,omitempty"` Infra bool `json:"infra,omitempty"` InfraImage string `json:"infra_image,omitempty"` @@ -153,123 +154,149 @@ type PodLogsOptions struct { Color bool } +// PodCloneOptions contains options for cloning an existing pod +type PodCloneOptions struct { + ID string + Destroy bool + CreateOpts PodCreateOptions + InfraOptions ContainerCreateOptions + PerContainerOptions ContainerCreateOptions + Start bool +} + +type ContainerMode string + +const ( + InfraMode = ContainerMode("infra") + CloneMode = ContainerMode("clone") + UpdateMode = ContainerMode("update") + CreateMode = ContainerMode("create") +) + type ContainerCreateOptions struct { - Annotation []string - Attach []string - Authfile string - BlkIOWeight string - BlkIOWeightDevice []string - CapAdd []string - CapDrop []string - CgroupNS string - CgroupsMode string - CgroupParent string `json:"cgroup_parent,omitempty"` - CIDFile string - ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` - CPUPeriod uint64 - CPUQuota int64 - CPURTPeriod uint64 - CPURTRuntime int64 - CPUShares uint64 - CPUS float64 `json:"cpus,omitempty"` - CPUSetCPUs string `json:"cpuset_cpus,omitempty"` - CPUSetMems string - Devices []string `json:"devices,omitempty"` - DeviceCgroupRule []string - DeviceReadBPs []string `json:"device_read_bps,omitempty"` - DeviceReadIOPs []string - DeviceWriteBPs []string - DeviceWriteIOPs []string - Entrypoint *string `json:"container_command,omitempty"` - Env []string - EnvHost bool - EnvFile []string - Expose []string - GIDMap []string - GroupAdd []string - HealthCmd string - HealthInterval string - HealthRetries uint - HealthStartPeriod string - HealthTimeout string - Hostname string `json:"hostname,omitempty"` - HTTPProxy bool - HostUsers []string - ImageVolume string - Init bool - InitContainerType string - InitPath string - Interactive bool - IPC string - Label []string - LabelFile []string - LogDriver string - LogOptions []string - Memory string - MemoryReservation string - MemorySwap string - MemorySwappiness int64 - Name string `json:"container_name"` - NoHealthCheck bool - OOMKillDisable bool - OOMScoreAdj *int - Arch string - OS string - Variant string - PID string `json:"pid,omitempty"` - PIDsLimit *int64 - Platform string - Pod string - PodIDFile string - Personality string - PreserveFDs uint - Privileged bool - PublishAll bool - Pull string - Quiet bool - ReadOnly bool - ReadOnlyTmpFS bool - Restart string - Replace bool - Requires []string - Rm bool - RootFS bool - Secrets []string - SecurityOpt []string `json:"security_opt,omitempty"` - SdNotifyMode string - ShmSize string - SignaturePolicy string - StopSignal string - StopTimeout uint - StorageOpts []string - SubUIDName string - SubGIDName string - Sysctl []string `json:"sysctl,omitempty"` - Systemd string - Timeout uint - TLSVerify commonFlag.OptionalBool - TmpFS []string - TTY bool - Timezone string - Umask string - UnsetEnv []string - UnsetEnvAll bool - UIDMap []string - Ulimit []string - User string - UserNS string `json:"-"` - UTS string - Mount []string - Volume []string `json:"volume,omitempty"` - VolumesFrom []string `json:"volumes_from,omitempty"` - Workdir string - SeccompPolicy string - PidFile string - ChrootDirs []string - IsInfra bool - IsClone bool - - Net *NetOptions `json:"net,omitempty"` + Annotation []string + Attach []string + Authfile string + BlkIOWeight string + BlkIOWeightDevice []string + CapAdd []string + CapDrop []string + CgroupNS string + CgroupsMode string + CgroupParent string `json:"cgroup_parent,omitempty"` + CIDFile string + ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` + CPUPeriod uint64 + CPUQuota int64 + CPURTPeriod uint64 + CPURTRuntime int64 + CPUShares uint64 + CPUS float64 `json:"cpus,omitempty"` + CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + CPUSetMems string + Devices []string `json:"devices,omitempty"` + DeviceCgroupRule []string + DeviceReadBPs []string `json:"device_read_bps,omitempty"` + DeviceReadIOPs []string + DeviceWriteBPs []string + DeviceWriteIOPs []string + Entrypoint *string `json:"container_command,omitempty"` + Env []string + EnvHost bool + EnvFile []string + Expose []string + GIDMap []string + GroupAdd []string + HealthCmd string + HealthInterval string + HealthRetries uint + HealthStartPeriod string + HealthTimeout string + HealthOnFailure string + Hostname string `json:"hostname,omitempty"` + HTTPProxy bool + HostUsers []string + ImageVolume string + Init bool + InitContainerType string + InitPath string + Interactive bool + IPC string + Label []string + LabelFile []string + LogDriver string + LogOptions []string + Memory string + MemoryReservation string + MemorySwap string + MemorySwappiness int64 + Name string `json:"container_name"` + NoHealthCheck bool + OOMKillDisable bool + OOMScoreAdj *int + Arch string + OS string + Variant string + PID string `json:"pid,omitempty"` + PIDsLimit *int64 + Platform string + Pod string + PodIDFile string + Personality string + PreserveFDs uint + Privileged bool + PublishAll bool + Pull string + Quiet bool + ReadOnly bool + ReadWriteTmpFS bool + Restart string + Replace bool + Requires []string + Rm bool + RootFS bool + Secrets []string + SecurityOpt []string `json:"security_opt,omitempty"` + SdNotifyMode string + ShmSize string + SignaturePolicy string + StartupHCCmd string + StartupHCInterval string + StartupHCRetries uint + StartupHCSuccesses uint + StartupHCTimeout string + StopSignal string + StopTimeout uint + StorageOpts []string + SubUIDName string + SubGIDName string + Sysctl []string `json:"sysctl,omitempty"` + Systemd string + Timeout uint + TLSVerify commonFlag.OptionalBool + TmpFS []string + TTY bool + Timezone string + Umask string + EnvMerge []string + UnsetEnv []string + UnsetEnvAll bool + UIDMap []string + Ulimit []string + User string + UserNS string `json:"-"` + UTS string + Mount []string + Volume []string `json:"volume,omitempty"` + VolumesFrom []string `json:"volumes_from,omitempty"` + Workdir string + SeccompPolicy string + PidFile string + ChrootDirs []string + IsInfra bool + IsClone bool + DecryptionKeys []string + Net *NetOptions `json:"net,omitempty"` CgroupConf []string @@ -286,7 +313,11 @@ func NewInfraContainerCreateOptions() ContainerCreateOptions { } type PodCreateReport struct { - Id string // nolint + Id string //nolint:revive,stylecheck +} + +type PodCloneReport struct { + Id string //nolint:revive,stylecheck } func (p *PodCreateOptions) CPULimits() *specs.LinuxCPU { @@ -319,6 +350,7 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod } s.Pid = out s.Hostname = p.Hostname + s.ExitPolicy = p.ExitPolicy s.Labels = p.Labels s.Devices = p.Devices s.SecurityOpt = p.SecurityOpt @@ -387,7 +419,7 @@ type PodPruneOptions struct { type PodPruneReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck } type PodTopOptions struct { @@ -412,15 +444,6 @@ type PodPSOptions struct { Sort string } -type PodInspectOptions struct { - Latest bool - - // Options for the API. - NameOrID string - - Format string -} - type PodInspectReport struct { *define.InspectPodData } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go index 54bcd092baf..6ccbc9a623a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go @@ -1,8 +1,9 @@ package reports type RmReport struct { - Id string `json:"Id"` //nolint - Err error `json:"Err,omitempty"` + Id string `json:"Id"` //nolint:revive,stylecheck + Err error `json:"Err,omitempty"` + RawInput string `json:"-"` } func RmReportsIds(r []*RmReport) []string { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go index 497e5d6069d..ac3d8e7cedc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go @@ -1,7 +1,7 @@ package reports type PruneReport struct { - Id string `json:"Id"` //nolint + Id string `json:"Id"` //nolint:revive,stylecheck Err error `json:"Err,omitempty"` Size uint64 `json:"Size"` } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/scp.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/scp.go new file mode 100644 index 00000000000..1e102bab3d7 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/scp.go @@ -0,0 +1,5 @@ +package reports + +type ScpReport struct { + Id string `json:"Id"` //nolint:revive,stylecheck +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go index d8af937a721..5686b90e9ad 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go @@ -13,6 +13,7 @@ type SecretCreateReport struct { type SecretCreateOptions struct { Driver string DriverOpts map[string]string + Labels map[string]string } type SecretListRequest struct { @@ -55,6 +56,7 @@ type SecretVersion struct { type SecretSpec struct { Name string Driver SecretDriverSpec + Labels map[string]string } type SecretDriverSpec struct { @@ -70,6 +72,8 @@ type SecretCreateRequest struct { Data string // Driver represents a driver (default "file") Driver SecretDriverSpec + // Labels are labels on the secret + Labels map[string]string } // Secret create response diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go index 21026477d5b..5d7ef92d744 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go @@ -18,9 +18,10 @@ type ServiceOptions struct { // SystemPruneOptions provides options to prune system. type SystemPruneOptions struct { - All bool - Volume bool - Filters map[string][]string `json:"filters" schema:"filters"` + All bool + Volume bool + Filters map[string][]string `json:"filters" schema:"filters"` + External bool } // SystemPruneReport provides report after system prune is executed. @@ -28,6 +29,7 @@ type SystemPruneReport struct { PodPruneReport []*PodPruneReport ContainerPruneReports []*reports.PruneReport ImagePruneReports []*reports.PruneReport + NetworkPruneReports []*NetworkPruneReport VolumePruneReports []*reports.PruneReport ReclaimedSpace uint64 } @@ -46,6 +48,7 @@ type SystemDfOptions struct { // SystemDfReport describes the response for df information type SystemDfReport struct { + ImagesSize int64 Images []*SystemDfImageReport Containers []*SystemDfContainerReport Volumes []*SystemDfVolumeReport @@ -84,12 +87,6 @@ type SystemDfVolumeReport struct { ReclaimableSize int64 } -// SystemResetOptions describes the options for resetting your -// container runtime storage, etc -type SystemResetOptions struct { - Force bool -} - // SystemVersionReport describes version information about the running Podman service type SystemVersionReport struct { // Always populated diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go index 5ae8a49315f..44df664980e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go @@ -21,7 +21,7 @@ type Volume struct { } type Report struct { - Id []string // nolint + Id []string //nolint:revive,stylecheck Err map[string]error } @@ -78,10 +78,9 @@ type InspectOptions struct { // DiffOptions all API and CLI diff commands and diff sub-commands use the same options type DiffOptions struct { - Format string `json:",omitempty"` // CLI only - Latest bool `json:",omitempty"` // API and CLI, only supported by containers - Archive bool `json:",omitempty"` // CLI only - Type define.DiffType // Type which should be compared + Format string `json:",omitempty"` // CLI only + Latest bool `json:",omitempty"` // API and CLI, only supported by containers + Type define.DiffType // Type which should be compared } // DiffReport provides changes for object diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go index 84f85b83f47..dad09e07bdf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go @@ -19,6 +19,8 @@ type VolumeCreateOptions struct { Labels map[string]string `schema:"labels"` // Mapping of driver options and values. Options map[string]string `schema:"opts"` + // Ignore existing volumes + IgnoreIfExists bool `schema:"ignoreIfExist"` } type VolumeConfigResponse struct { @@ -33,7 +35,7 @@ type VolumeRmOptions struct { type VolumeRmReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck } type VolumeInspectReport struct { @@ -54,6 +56,11 @@ type VolumeListReport struct { VolumeConfigResponse } +// VolumeReloadReport describes the response from reload volume plugins +type VolumeReloadReport struct { + define.VolumeReload +} + /* * Docker API compatibility types */ @@ -61,7 +68,7 @@ type VolumeListReport struct { // VolumeMountReport describes the response from volume mount type VolumeMountReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck Name string Path string } @@ -69,5 +76,5 @@ type VolumeMountReport struct { // VolumeUnmountReport describes the response from umounting a volume type VolumeUnmountReport struct { Err error - Id string // nolint + Id string //nolint:revive,stylecheck } diff --git a/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go index 6ee1e7e86c4..9b456c9c08b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go +++ b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go @@ -1,11 +1,11 @@ package errorhandling import ( + "errors" "os" "strings" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -86,7 +86,7 @@ func Contains(err error, sub error) bool { // PodConflictErrorModel is used in remote connections with podman type PodConflictErrorModel struct { Errs []string - Id string // nolint + Id string //nolint:revive,stylecheck } // ErrorModel is used in remote connections with podman @@ -121,3 +121,22 @@ func (e PodConflictErrorModel) Error() string { func (e PodConflictErrorModel) Code() int { return 409 } + +// Cause returns the most underlying error for the provided one. There is a +// maximum error depth of 100 to avoid endless loops. An additional error log +// message will be created if this maximum has reached. +func Cause(err error) (cause error) { + cause = err + + const maxDepth = 100 + for i := 0; i <= maxDepth; i++ { + res := errors.Unwrap(cause) + if res == nil { + return cause + } + cause = res + } + + logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err) + return cause +} diff --git a/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go index 767d86daf59..15943858f95 100644 --- a/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go +++ b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go @@ -41,18 +41,3 @@ type RootFS struct { Type string `json:"Type"` Layers []digest.Digest `json:"Layers"` } - -// ImageResult is used for podman images for collection and output. -type ImageResult struct { - Tag string - Repository string - RepoDigests []string - RepoTags []string - ID string - Digest digest.Digest - ConfigDigest digest.Digest - Created time.Time - Size *uint64 - Labels map[string]string - Dangling bool -} diff --git a/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go index c95f8e27547..4d4e496a6a9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go @@ -19,8 +19,17 @@ const ( privateType = "private" shareableType = "shareable" slirpType = "slirp4netns" + pastaType = "pasta" ) +// KeepIDUserNsOptions defines how to keepIDmatically create a user namespace. +type KeepIDUserNsOptions struct { + // UID is the target uid in the user namespace. + UID *uint32 + // GID is the target uid in the user namespace. + GID *uint32 +} + // CgroupMode represents cgroup mode in the container. type CgroupMode string @@ -93,7 +102,8 @@ func (n UsernsMode) IsHost() bool { // IsKeepID indicates whether container uses a mapping where the (uid, gid) on the host is kept inside of the namespace. func (n UsernsMode) IsKeepID() bool { - return n == "keep-id" + parts := strings.Split(string(n), ":") + return parts[0] == "keep-id" } // IsNoMap indicates whether container uses a mapping where the (uid, gid) on the host is not present in the namespace. @@ -112,7 +122,7 @@ func (n UsernsMode) IsDefaultValue() bool { return n == "" || n == defaultType } -// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically +// GetAutoOptions returns a AutoUserNsOptions with the settings to automatically set up // a user namespace. func (n UsernsMode) GetAutoOptions() (*types.AutoUserNsOptions, error) { parts := strings.SplitN(string(n), ":", 2) @@ -154,6 +164,44 @@ func (n UsernsMode) GetAutoOptions() (*types.AutoUserNsOptions, error) { return &options, nil } +// GetKeepIDOptions returns a KeepIDUserNsOptions with the settings to keepIDmatically set up +// a user namespace. +func (n UsernsMode) GetKeepIDOptions() (*KeepIDUserNsOptions, error) { + parts := strings.SplitN(string(n), ":", 2) + if parts[0] != "keep-id" { + return nil, fmt.Errorf("wrong user namespace mode") + } + options := KeepIDUserNsOptions{} + if len(parts) == 1 { + return &options, nil + } + for _, o := range strings.Split(parts[1], ",") { + v := strings.SplitN(o, "=", 2) + if len(v) != 2 { + return nil, fmt.Errorf("invalid option specified: %q", o) + } + switch v[0] { + case "uid": + s, err := strconv.ParseUint(v[1], 10, 32) + if err != nil { + return nil, err + } + v := uint32(s) + options.UID = &v + case "gid": + s, err := strconv.ParseUint(v[1], 10, 32) + if err != nil { + return nil, err + } + v := uint32(s) + options.GID = &v + default: + return nil, fmt.Errorf("unknown option specified: %q", v[0]) + } + } + return &options, nil +} + // IsPrivate indicates whether the container uses the a private userns. func (n UsernsMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) @@ -393,6 +441,11 @@ func (n NetworkMode) IsSlirp4netns() bool { return n == slirpType || strings.HasPrefix(string(n), slirpType+":") } +// IsPasta indicates if we are running a rootless network stack using pasta +func (n NetworkMode) IsPasta() bool { + return n == pastaType || strings.HasPrefix(string(n), pastaType+":") +} + // IsNS indicates a network namespace passed in by path (ns:) func (n NetworkMode) IsNS() bool { return strings.HasPrefix(string(n), nsType) @@ -414,5 +467,5 @@ func (n NetworkMode) IsPod() bool { // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() && !n.IsSlirp4netns() && !n.IsNS() + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() && !n.IsSlirp4netns() && !n.IsPasta() && !n.IsNS() } diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go index d7143f54919..6b9b30f35a6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go @@ -29,7 +29,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { } // It could not join the pause process, let's lock the file before trying to delete it. - pidFileLock, err := lockfile.GetLockfile(pausePidPath) + pidFileLock, err := lockfile.GetLockFile(pausePidPath) if err != nil { // The file was deleted by another process. if os.IsNotExist(err) { @@ -40,9 +40,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { pidFileLock.Lock() defer func() { - if pidFileLock.Locked() { - pidFileLock.Unlock() - } + pidFileLock.Unlock() }() // Now the pause PID file is locked. Try to join once again in case it changed while it was not locked. @@ -50,7 +48,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { if err != nil { // It is still failing. We can safely remove it. os.Remove(pausePidPath) - return false, -1, nil // nolint: nilerr + return false, -1, nil //nolint: nilerr } return became, ret, err } @@ -137,7 +135,7 @@ func GetAvailableGids() (int64, error) { return countAvailableIDs(gids), nil } -// findIDInMappings find the the mapping that contains the specified ID. +// findIDInMappings find the mapping that contains the specified ID. // It assumes availableMappings is sorted by ID. func findIDInMappings(id int64, availableMappings []user.IDMap) *user.IDMap { i := sort.Search(len(availableMappings), func(i int) bool { diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.c b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.c new file mode 100644 index 00000000000..f8f0ba4b8e9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.c @@ -0,0 +1,63 @@ +#include +#include +#include +#include +#include + +static int open_files_max_fd; +static fd_set *open_files_set; + +int +is_fd_inherited(int fd) +{ + if (open_files_set == NULL || fd > open_files_max_fd || fd < 0) + return 0; + + return FD_ISSET(fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])) ? 1 : 0; +} + +static void __attribute__((constructor)) init() +{ + /* Store how many FDs were open before the Go runtime kicked in. */ + DIR* d = opendir ("/dev/fd"); + if (d) + { + struct dirent *ent; + size_t size = 0; + + for (ent = readdir (d); ent; ent = readdir (d)) + { + int fd; + + if (ent->d_name[0] == '.') + continue; + + fd = atoi (ent->d_name); + if (fd == dirfd (d)) { + continue; + } + + if (fd >= size * FD_SETSIZE) + { + int i; + size_t new_size; + + new_size = (fd / FD_SETSIZE) + 1; + open_files_set = realloc (open_files_set, new_size * sizeof (fd_set)); + if (open_files_set == NULL) + _exit (EXIT_FAILURE); + + for (i = size; i < new_size; i++) + FD_ZERO (&(open_files_set[i])); + + size = new_size; + } + + if (fd > open_files_max_fd) { + open_files_max_fd = fd; + } + + FD_SET (fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])); + } + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go new file mode 100644 index 00000000000..525f789db49 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go @@ -0,0 +1,69 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + +package rootless + +import ( + "errors" + + "github.com/containers/storage/pkg/idtools" +) + +// extern int is_fd_inherited(int fd); +import "C" + +// IsRootless returns whether the user is rootless +func IsRootless() bool { + return false +} + +// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed +// into a new user namespace and the return code from the re-executed podman process. +// If podman was re-executed the caller needs to propagate the error code returned by the child +// process. It is a convenience function for BecomeRootInUserNSWithOpts with a default configuration. +func BecomeRootInUserNS(pausePid string) (bool, int, error) { + return false, -1, errors.New("Rootless mode is not supported on FreeBSD - run podman as root") +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return -1 +} + +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + return -1 +} + +// TryJoinFromFilePaths attempts to join the namespaces of the pid files in paths. +// This is useful when there are already running containers and we +// don't have a pause process yet. We can use the paths to the conmon +// processes to attempt joining their namespaces. +// If needNewNamespace is set, the file is read from a temporary user +// namespace, this is useful for containers that are running with a +// different uidmap and the unprivileged user has no way to read the +// file owned by the root in the container. +func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) { + return false, -1, errors.New("this function is not supported on this os") +} + +// ConfigurationMatches checks whether the additional uids/gids configured for the user +// match the current user namespace. +func ConfigurationMatches() (bool, error) { + return true, nil +} + +// GetConfiguredMappings returns the additional IDs configured for the current user. +func GetConfiguredMappings(quiet bool) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, errors.New("this function is not supported on this os") +} + +// ReadMappingsProc returns the uid_map and gid_map +func ReadMappingsProc(path string) ([]idtools.IDMap, error) { + return nil, nil +} + +// IsFdInherited checks whether the fd is opened and valid to use +func IsFdInherited(fd int) bool { + return int(C.is_fd_inherited(C.int(fd))) > 0 +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c index 94bd40f8627..7e8b3f78a68 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +20,9 @@ #include #include +#define ETC_PREEXEC_HOOKS "/etc/containers/pre-exec-hooks" +#define LIBEXECPODMAN "/usr/libexec/podman" + #ifndef TEMP_FAILURE_RETRY #define TEMP_FAILURE_RETRY(expression) \ (__extension__ \ @@ -117,6 +121,155 @@ rootless_gid () return rootless_gid_init; } +/* exec the specified executable and exit if it fails. */ +static void +exec_binary (const char *path, char **argv, int argc) +{ + int r, status = 0; + pid_t pid; + + pid = fork (); + if (pid < 0) + { + fprintf (stderr, "fork: %m\n"); + exit (EXIT_FAILURE); + } + if (pid == 0) + { + size_t i; + char **newargv = malloc ((argc + 2) * sizeof(char *)); + if (!newargv) + { + fprintf (stderr, "malloc: %m\n"); + exit (EXIT_FAILURE); + } + newargv[0] = (char*) path; + for (i = 0; i < argc; i++) + newargv[i+1] = argv[i]; + + newargv[i+1] = NULL; + errno = 0; + execv (path, newargv); + /* If the file was deleted in the meanwhile, return success. */ + if (errno == ENOENT) + exit (EXIT_SUCCESS); + exit (EXIT_FAILURE); + } + + r = TEMP_FAILURE_RETRY (waitpid (pid, &status, 0)); + if (r < 0) + { + fprintf (stderr, "waitpid: %m\n"); + exit (EXIT_FAILURE); + } + if (WIFEXITED(status) && WEXITSTATUS (status)) + { + fprintf (stderr, "external preexec hook %s failed\n", path); + exit (WEXITSTATUS(status)); + } + if (WIFSIGNALED (status)) + { + fprintf (stderr, "external preexec hook %s failed\n", path); + exit (127+WTERMSIG (status)); + } + if (WIFSTOPPED (status)) + { + fprintf (stderr, "external preexec hook %s failed\n", path); + exit (EXIT_FAILURE); + } +} + +static void +do_preexec_hooks_dir (const char *dir, char **argv, int argc) +{ + cleanup_free char *buffer = NULL; + cleanup_dir DIR *d = NULL; + size_t i, nfiles = 0; + struct dirent *de; + + /* Store how many FDs were open before the Go runtime kicked in. */ + d = opendir (dir); + if (!d) + { + if (errno != ENOENT) + { + fprintf (stderr, "opendir %s: %m\n", dir); + exit (EXIT_FAILURE); + } + return; + } + + errno = 0; + + for (de = readdir (d); de; de = readdir (d)) + { + buffer = realloc (buffer, (nfiles + 1) * (NAME_MAX + 1)); + if (buffer == NULL) + { + fprintf (stderr, "realloc buffer: %m\n"); + exit (EXIT_FAILURE); + } + + if (de->d_type != DT_REG) + continue; + + strncpy (buffer + nfiles * (NAME_MAX + 1), de->d_name, NAME_MAX + 1); + nfiles++; + buffer[nfiles * (NAME_MAX + 1)] = '\0'; + } + + qsort (buffer, nfiles, NAME_MAX + 1, (int (*)(const void *, const void *)) strcmp); + + for (i = 0; i < nfiles; i++) + { + const char *fname = buffer + i * (NAME_MAX + 1); + char path[PATH_MAX]; + struct stat st; + int ret; + + ret = snprintf (path, PATH_MAX, "%s/%s", dir, fname); + if (ret == PATH_MAX) + { + fprintf (stderr, "internal error: path too long\n"); + exit (EXIT_FAILURE); + } + + ret = stat (path, &st); + if (ret < 0) + { + /* Ignore the failure if the file was deleted. */ + if (errno == ENOENT) + continue; + + fprintf (stderr, "stat %s: %m\n", path); + exit (EXIT_FAILURE); + } + + /* Not an executable. */ + if ((st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) + continue; + + exec_binary (path, argv, argc); + errno = 0; + } + + if (errno) + { + fprintf (stderr, "readdir %s: %m\n", dir); + exit (EXIT_FAILURE); + } +} + +static void +do_preexec_hooks (char **argv, int argc) +{ + char *preexec_hooks = getenv ("PODMAN_PREEXEC_HOOKS_DIR"); + do_preexec_hooks_dir (LIBEXECPODMAN "/pre-exec-hooks", argv, argc); + do_preexec_hooks_dir (ETC_PREEXEC_HOOKS, argv, argc); + if (preexec_hooks && preexec_hooks[0]) + do_preexec_hooks_dir (preexec_hooks, argv, argc); +} + static void do_pause () { @@ -134,7 +287,7 @@ do_pause () sigaction (sig[i], &act, NULL); /* Attempt to execv catatonit to keep the pause process alive. */ - execl ("/usr/libexec/podman/catatonit", "catatonit", "-P", NULL); + execl (LIBEXECPODMAN "catatonit", "catatonit", "-P", NULL); execl ("/usr/bin/catatonit", "catatonit", "-P", NULL); /* and if the catatonit executable could not be found, fallback here... */ @@ -144,7 +297,7 @@ do_pause () } static char ** -get_cmd_line_args () +get_cmd_line_args (int *argc_out) { cleanup_free char *buffer = NULL; cleanup_close int fd = -1; @@ -178,7 +331,7 @@ get_cmd_line_args () char *tmp = realloc (buffer, allocated); if (tmp == NULL) return NULL; - buffer = tmp; + buffer = tmp; } } @@ -204,13 +357,15 @@ get_cmd_line_args () /* Move ownership. */ buffer = NULL; + if (argc_out) + *argc_out = argc; + return argv; } static bool -can_use_shortcut () +can_use_shortcut (char **argv) { - cleanup_free char **argv = NULL; cleanup_free char *argv0 = NULL; bool ret = true; int argc; @@ -219,10 +374,6 @@ can_use_shortcut () return false; #endif - argv = get_cmd_line_args (); - if (argv == NULL) - return false; - argv0 = argv[0]; if (strstr (argv[0], "podman") == NULL) @@ -235,6 +386,7 @@ can_use_shortcut () if (strcmp (argv[argc], "mount") == 0 || strcmp (argv[argc], "machine") == 0 + || strcmp (argv[argc], "context") == 0 || strcmp (argv[argc], "search") == 0 || (strcmp (argv[argc], "system") == 0 && argv[argc+1] && strcmp (argv[argc+1], "service") != 0)) { @@ -243,7 +395,7 @@ can_use_shortcut () } if (argv[argc+1] != NULL && (strcmp (argv[argc], "container") == 0 || - strcmp (argv[argc], "image") == 0) && + strcmp (argv[argc], "image") == 0) && (strcmp (argv[argc+1], "mount") == 0 || strcmp (argv[argc+1], "scp") == 0)) { ret = false; @@ -286,7 +438,9 @@ static void __attribute__((constructor)) init() const char *listen_pid; const char *listen_fds; const char *listen_fdnames; + cleanup_free char **argv = NULL; cleanup_dir DIR *d = NULL; + int argc; pause = getenv ("_PODMAN_PAUSE"); if (pause && pause[0]) @@ -336,30 +490,40 @@ static void __attribute__((constructor)) init() } } - listen_pid = getenv("LISTEN_PID"); - listen_fds = getenv("LISTEN_FDS"); - listen_fdnames = getenv("LISTEN_FDNAMES"); - - if (listen_pid != NULL && listen_fds != NULL && strtol(listen_pid, NULL, 10) == getpid()) - { - // save systemd socket environment for rootless child - do_socket_activation = true; - saved_systemd_listen_pid = strdup(listen_pid); - saved_systemd_listen_fds = strdup(listen_fds); - if (listen_fdnames != NULL) - saved_systemd_listen_fdnames = strdup(listen_fdnames); - if (saved_systemd_listen_pid == NULL - || saved_systemd_listen_fds == NULL) - { - fprintf (stderr, "save socket listen environments error: %m\n"); - _exit (EXIT_FAILURE); - } - } + argv = get_cmd_line_args (&argc); + if (argv == NULL) + { + fprintf(stderr, "cannot retrieve cmd line"); + _exit (EXIT_FAILURE); + } + + if (geteuid () != 0 || getenv ("_CONTAINERS_USERNS_CONFIGURED") == NULL) + do_preexec_hooks(argv, argc); + + listen_pid = getenv("LISTEN_PID"); + listen_fds = getenv("LISTEN_FDS"); + listen_fdnames = getenv("LISTEN_FDNAMES"); + + if (listen_pid != NULL && listen_fds != NULL && strtol(listen_pid, NULL, 10) == getpid()) + { + // save systemd socket environment for rootless child + do_socket_activation = true; + saved_systemd_listen_pid = strdup(listen_pid); + saved_systemd_listen_fds = strdup(listen_fds); + if (listen_fdnames != NULL) + saved_systemd_listen_fdnames = strdup(listen_fdnames); + if (saved_systemd_listen_pid == NULL + || saved_systemd_listen_fds == NULL) + { + fprintf (stderr, "save socket listen environments error: %m\n"); + _exit (EXIT_FAILURE); + } + } /* Shortcut. If we are able to join the pause pid file, do it now so we don't need to re-exec. */ xdg_runtime_dir = getenv ("XDG_RUNTIME_DIR"); - if (geteuid () != 0 && xdg_runtime_dir && xdg_runtime_dir[0] && can_use_shortcut ()) + if (geteuid () != 0 && xdg_runtime_dir && xdg_runtime_dir[0] && can_use_shortcut (argv)) { cleanup_free char *cwd = NULL; cleanup_close int userns_fd = -1; @@ -505,14 +669,16 @@ create_pause_process (const char *pause_pid_file_path, char **argv) if (pid) { char b; - int r; + int r, r2; close (p[1]); /* Block until we write the pid file. */ r = TEMP_FAILURE_RETRY (read (p[0], &b, 1)); close (p[0]); - reexec_in_user_namespace_wait (pid, 0); + r2 = reexec_in_user_namespace_wait (pid, 0); + if (r2 != 0) + return -1; return r == 1 && b == '0' ? 0 : -1; } @@ -645,7 +811,7 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) sprintf (uid, "%d", geteuid ()); sprintf (gid, "%d", getegid ()); - argv = get_cmd_line_args (); + argv = get_cmd_line_args (NULL); if (argv == NULL) { fprintf (stderr, "cannot read argv: %m\n"); @@ -757,6 +923,7 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) } execvp (argv[0], argv); + fprintf (stderr, "failed to execvp %s: %m\n", argv[0]); _exit (EXIT_FAILURE); } @@ -788,7 +955,10 @@ copy_file_to_fd (const char *file_to_read, int outfd) fd = open (file_to_read, O_RDONLY); if (fd < 0) - return fd; + { + fprintf (stderr, "open `%s`: %m\n", file_to_read); + return fd; + } for (;;) { @@ -796,7 +966,10 @@ copy_file_to_fd (const char *file_to_read, int outfd) r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof buf)); if (r < 0) - return r; + { + fprintf (stderr, "read from `%s`: %m\n", file_to_read); + return r; + } if (r == 0) break; @@ -805,7 +978,10 @@ copy_file_to_fd (const char *file_to_read, int outfd) { w = TEMP_FAILURE_RETRY (write (outfd, &buf[t], r - t)); if (w < 0) - return w; + { + fprintf (stderr, "write file to output fd `%s`: %m\n", file_to_read); + return w; + } t += w; } } @@ -885,7 +1061,7 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re _exit (EXIT_FAILURE); } - argv = get_cmd_line_args (); + argv = get_cmd_line_args (NULL); if (argv == NULL) { fprintf (stderr, "cannot read argv: %m\n"); diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go index 5af9a978b0e..1937a1330d9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go @@ -6,9 +6,9 @@ package rootless import ( "bufio" "bytes" + "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" gosignal "os/signal" @@ -23,7 +23,6 @@ import ( "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" @@ -126,7 +125,7 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err } path, err := exec.LookPath(tool) if err != nil { - return errors.Wrapf(err, "command required for rootless mode with multiple IDs") + return fmt.Errorf("command required for rootless mode with multiple IDs: %w", err) } appendTriplet := func(l []string, a, b, c int) []string { @@ -143,7 +142,7 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err what = "GID" where = "/etc/subgid" } - return errors.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what) + return fmt.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what) } args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size) } @@ -154,13 +153,13 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err if output, err := cmd.CombinedOutput(); err != nil { logrus.Errorf("running `%s`: %s", strings.Join(args, " "), output) - errorStr := fmt.Sprintf("cannot setup namespace using %q", path) + errorStr := fmt.Sprintf("cannot set up namespace using %q", path) if isSet, err := unshare.IsSetID(cmd.Path, mode, cap); err != nil { logrus.Errorf("Failed to check for %s on %s: %v", idtype, path, err) } else if !isSet { errorStr = fmt.Sprintf("%s: should have %s or have filecaps %s", errorStr, idtype, idtype) } - return errors.Wrapf(err, errorStr) + return fmt.Errorf("%v: %w", errorStr, err) } return nil } @@ -173,7 +172,7 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { if err != nil { return false, 0, err } - if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { + if (os.Geteuid() == 0 && hasCapSysAdmin) || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { return false, 0, nil } @@ -182,7 +181,7 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { pidC := C.reexec_userns_join(C.int(pid), cPausePid) if int(pidC) < 0 { - return false, -1, errors.Errorf("cannot re-exec process") + return false, -1, fmt.Errorf("cannot re-exec process to join the existing user namespace") } ret := C.reexec_in_user_namespace_wait(pidC, 0) @@ -194,7 +193,7 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { } // GetConfiguredMappings returns the additional IDs configured for the current user. -func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { +func GetConfiguredMappings(quiet bool) ([]idtools.IDMap, []idtools.IDMap, error) { var uids, gids []idtools.IDMap username := os.Getenv("USER") if username == "" { @@ -212,7 +211,7 @@ func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { mappings, err := idtools.NewIDMappings(username, username) if err != nil { logLevel := logrus.ErrorLevel - if os.Geteuid() == 0 && GetRootlessUID() == 0 { + if quiet || (os.Geteuid() == 0 && GetRootlessUID() == 0) { logLevel = logrus.DebugLevel } logrus.StandardLogger().Logf(logLevel, "cannot find UID/GID for user %s: %v - check rootless mode in man pages.", username, err) @@ -224,7 +223,12 @@ func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { } func copyMappings(from, to string) error { - content, err := ioutil.ReadFile(from) + // when running as non-root always go through the newuidmap/newgidmap + // configuration since this is the expectation when running on Kubernetes + if os.Geteuid() != 0 { + return errors.New("copying mappings is allowed only for root") + } + content, err := os.ReadFile(from) if err != nil { return err } @@ -235,7 +239,7 @@ func copyMappings(from, to string) error { if bytes.Contains(content, []byte("4294967295")) { content = []byte("0 0 1\n1 1 4294967294\n") } - return ioutil.WriteFile(to, content, 0600) + return os.WriteFile(to, content, 0600) } func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ bool, _ int, retErr error) { @@ -244,27 +248,29 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo return false, 0, err } - if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { + if (os.Geteuid() == 0 && hasCapSysAdmin) || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { if os.Getenv("_CONTAINERS_USERNS_CONFIGURED") == "init" { return false, 0, runInUser() } return false, 0, nil } - if mounts, err := pmount.GetMounts(); err == nil { - for _, m := range mounts { - if m.Mountpoint == "/" { - isShared := false - for _, o := range strings.Split(m.Optional, ",") { - if strings.HasPrefix(o, "shared:") { - isShared = true - break + if _, inContainer := os.LookupEnv("container"); !inContainer { + if mounts, err := pmount.GetMounts(); err == nil { + for _, m := range mounts { + if m.Mountpoint == "/" { + isShared := false + for _, o := range strings.Split(m.Optional, ",") { + if strings.HasPrefix(o, "shared:") { + isShared = true + break + } } + if !isShared { + logrus.Warningf("%q is not a shared mount, this could cause issues or missing mounts with rootless containers", m.Mountpoint) + } + break } - if !isShared { - logrus.Warningf("%q is not a shared mount, this could cause issues or missing mounts with rootless containers", m.Mountpoint) - } - break } } } @@ -303,7 +309,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo if retErr != nil && pid > 0 { if err := unix.Kill(pid, unix.SIGKILL); err != nil { if err != unix.ESRCH { - logrus.Errorf("Failed to cleanup process %d: %v", pid, err) + logrus.Errorf("Failed to clean up process %d: %v", pid, err) } } C.reexec_in_user_namespace_wait(C.int(pid), 0) @@ -313,10 +319,10 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD) pid = int(pidC) if pid < 0 { - return false, -1, errors.Errorf("cannot re-exec process") + return false, -1, fmt.Errorf("cannot re-exec process") } - uids, gids, err := GetConfiguredMappings() + uids, gids, err := GetConfiguredMappings(false) if err != nil { return false, -1, err } @@ -341,15 +347,15 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo if !uidsMapped { logrus.Warnf("Using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding sub*ids if not using a network user") setgroups := fmt.Sprintf("/proc/%d/setgroups", pid) - err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666) + err = os.WriteFile(setgroups, []byte("deny\n"), 0666) if err != nil { - return false, -1, errors.Wrapf(err, "cannot write setgroups file") + return false, -1, fmt.Errorf("cannot write setgroups file: %w", err) } logrus.Debugf("write setgroups file exited with 0") - err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666) + err = os.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666) if err != nil { - return false, -1, errors.Wrapf(err, "cannot write uid_map") + return false, -1, fmt.Errorf("cannot write uid_map: %w", err) } logrus.Debugf("write uid_map exited with 0") } @@ -367,21 +373,21 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo gidsMapped = err == nil } if !gidsMapped { - err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666) + err = os.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666) if err != nil { - return false, -1, errors.Wrapf(err, "cannot write gid_map") + return false, -1, fmt.Errorf("cannot write gid_map: %w", err) } } _, err = w.Write([]byte("0")) if err != nil { - return false, -1, errors.Wrapf(err, "write to sync pipe") + return false, -1, fmt.Errorf("write to sync pipe: %w", err) } b := make([]byte, 1) _, err = w.Read(b) if err != nil { - return false, -1, errors.Wrapf(err, "read from sync pipe") + return false, -1, fmt.Errorf("read from sync pipe: %w", err) } if fileOutput != nil { @@ -397,14 +403,15 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo // We have lost the race for writing the PID file, as probably another // process created a namespace and wrote the PID. // Try to join it. - data, err := ioutil.ReadFile(pausePid) + data, err := os.ReadFile(pausePid) if err == nil { - pid, err := strconv.ParseUint(string(data), 10, 0) + var pid uint64 + pid, err = strconv.ParseUint(string(data), 10, 0) if err == nil { return joinUserAndMountNS(uint(pid), "") } } - return false, -1, errors.New("setting up the process") + return false, -1, fmt.Errorf("setting up the process: %w", err) } if b[0] != '0' { @@ -461,17 +468,12 @@ func BecomeRootInUserNS(pausePid string) (bool, int, error) { // different uidmap and the unprivileged user has no way to read the // file owned by the root in the container. func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) { - if len(paths) == 0 { - return BecomeRootInUserNS(pausePidPath) - } - var lastErr error var pausePid int - foundProcess := false for _, path := range paths { if !needNewNamespace { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { lastErr = err continue @@ -479,12 +481,9 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st pausePid, err = strconv.Atoi(string(data)) if err != nil { - lastErr = errors.Wrapf(err, "cannot parse file %s", path) + lastErr = fmt.Errorf("cannot parse file %q: %w", path, err) continue } - - lastErr = nil - break } else { r, w, err := os.Pipe() if err != nil { @@ -511,26 +510,29 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st n, err := r.Read(b) if err != nil { - lastErr = errors.Wrapf(err, "cannot read %s\n", path) + lastErr = fmt.Errorf("cannot read %q: %w", path, err) continue } pausePid, err = strconv.Atoi(string(b[:n])) - if err == nil && unix.Kill(pausePid, 0) == nil { - foundProcess = true - lastErr = nil - break + if err != nil { + lastErr = err + continue } } - } - if !foundProcess && pausePidPath != "" { - return BecomeRootInUserNS(pausePidPath) + + if pausePid > 0 && unix.Kill(pausePid, 0) == nil { + joined, pid, err := joinUserAndMountNS(uint(pausePid), pausePidPath) + if err == nil { + return joined, pid, nil + } + lastErr = err + } } if lastErr != nil { return false, 0, lastErr } - - return joinUserAndMountNS(uint(pausePid), pausePidPath) + return false, 0, fmt.Errorf("could not find any running process: %w", unix.ESRCH) } // ReadMappingsProc parses and returns the ID mappings at the specified path. @@ -550,7 +552,7 @@ func ReadMappingsProc(path string) ([]idtools.IDMap, error) { if err == io.EOF { return mappings, nil } - return nil, errors.Wrapf(err, "cannot read line from %s", path) + return nil, fmt.Errorf("cannot read line from %s: %w", path, err) } if line == nil { return mappings, nil @@ -558,7 +560,7 @@ func ReadMappingsProc(path string) ([]idtools.IDMap, error) { containerID, hostID, size := 0, 0, 0 if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { - return nil, errors.Wrapf(err, "cannot parse %s", string(line)) + return nil, fmt.Errorf("cannot parse %s: %w", string(line), err) } mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) } @@ -595,7 +597,7 @@ func ConfigurationMatches() (bool, error) { return true, nil } - uids, gids, err := GetConfiguredMappings() + uids, gids, err := GetConfiguredMappings(false) if err != nil { return false, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go index fe164e2350c..c2e86fa1d82 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go @@ -1,13 +1,13 @@ -//go:build !linux || !cgo -// +build !linux !cgo +//go:build !(linux || freebsd) || !cgo +// +build !linux,!freebsd !cgo package rootless import ( + "errors" "os" "github.com/containers/storage/pkg/idtools" - "github.com/pkg/errors" ) // IsRootless returns whether the user is rootless @@ -57,7 +57,7 @@ func ConfigurationMatches() (bool, error) { } // GetConfiguredMappings returns the additional IDs configured for the current user. -func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { +func GetConfiguredMappings(quiet bool) ([]idtools.IDMap, []idtools.IDMap, error) { return nil, nil, errors.New("this function is not supported on this os") } diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go index 5ea67843a5d..a81d0461b53 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go @@ -2,11 +2,17 @@ package signal import ( "fmt" + "os" + "os/signal" "strconv" "strings" "syscall" ) +// Make sure the signal buffer is sufficiently big. +// runc is using the same value. +const SignalBufferSize = 2048 + // ParseSignal translates a string to a valid syscall signal. // It returns an error if the signal map doesn't include the given signal. func ParseSignal(rawSignal string) (syscall.Signal, error) { @@ -17,7 +23,7 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) { } return syscall.Signal(s), nil } - sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + sig, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] if !ok { return -1, fmt.Errorf("invalid signal: %s", rawSignal) } @@ -32,10 +38,36 @@ func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) { if err == nil { return s, nil } - for k, v := range signalMap { + for k, v := range SignalMap { if k == strings.ToUpper(basename) { return v, nil } } return -1, fmt.Errorf("invalid signal: %s", basename) } + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(SignalMap)) + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSysSignalToName translates syscall.Signal to its name in the operating system. +// For example, syscall.Signal(9) will return "KILL" on Linux system. +func ParseSysSignalToName(s syscall.Signal) (string, error) { + for k, v := range SignalMap { + if v == s { + return k, nil + } + } + return "", fmt.Errorf("unknown syscall signal: %s", s) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go index 21e09c9fef0..81e4ed758a3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go @@ -9,8 +9,6 @@ package signal // NOTE: this package has originally been copied from github.com/docker/docker. import ( - "os" - "os/signal" "syscall" "golang.org/x/sys/unix" @@ -23,8 +21,8 @@ const ( SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows ) -// signalMap is a map of Linux signals. -var signalMap = map[string]syscall.Signal{ +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ "ABRT": unix.SIGABRT, "ALRM": unix.SIGALRM, "BUS": unix.SIGBUS, @@ -92,17 +90,10 @@ var signalMap = map[string]syscall.Signal{ "RTMAX": sigrtmax, } -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - handledSigs := make([]os.Signal, 0, len(signalMap)) - for _, s := range signalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) +// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal +func IsSignalIgnoredBySigProxy(s syscall.Signal) bool { + // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself. + // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up". + // https://github.com/containers/podman/issues/5483 + return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG } diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go index 52b07aaf463..c97eeb23daa 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go @@ -10,8 +10,6 @@ package signal // NOTE: this package has originally been copied from github.com/docker/docker. import ( - "os" - "os/signal" "syscall" "golang.org/x/sys/unix" @@ -24,8 +22,8 @@ const ( SIGWINCH = syscall.SIGWINCH ) -// signalMap is a map of Linux signals. -var signalMap = map[string]syscall.Signal{ +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ "ABRT": unix.SIGABRT, "ALRM": unix.SIGALRM, "BUS": unix.SIGBUS, @@ -93,17 +91,10 @@ var signalMap = map[string]syscall.Signal{ "RTMAX": sigrtmax, } -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - handledSigs := make([]os.Signal, 0, len(signalMap)) - for _, s := range signalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) +// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal +func IsSignalIgnoredBySigProxy(s syscall.Signal) bool { + // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself. + // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up". + // https://github.com/containers/podman/issues/5483 + return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG } diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go index c0aa62d2107..01d99d7bc15 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go @@ -5,7 +5,6 @@ package signal import ( - "os" "syscall" ) @@ -16,12 +15,12 @@ const ( SIGWINCH = syscall.SIGWINCH ) -// signalMap is a map of Linux signals. +// SignalMap is a map of Linux signals. // These constants are sourced from the Linux version of golang.org/x/sys/unix // (I don't see much risk of this changing). // This should work as long as Podman only runs containers on Linux, which seems // a safe assumption for now. -var signalMap = map[string]syscall.Signal{ +var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.Signal(0x6), "ALRM": syscall.Signal(0xe), "BUS": syscall.Signal(0x7), @@ -89,12 +88,10 @@ var signalMap = map[string]syscall.Signal{ "RTMAX": sigrtmax, } -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - panic("Unsupported on non-linux platforms") -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - panic("Unsupported on non-linux platforms") +// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal +func IsSignalIgnoredBySigProxy(s syscall.Signal) bool { + // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself. + // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up". + // https://github.com/containers/podman/issues/5483 + return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG } diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go index d8bba7c905c..590aaf978f7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go @@ -5,7 +5,6 @@ package signal import ( - "os" "syscall" ) @@ -16,12 +15,12 @@ const ( SIGWINCH = syscall.Signal(0xff) ) -// signalMap is a map of Linux signals. +// SignalMap is a map of Linux signals. // These constants are sourced from the Linux version of golang.org/x/sys/unix // (I don't see much risk of this changing). // This should work as long as Podman only runs containers on Linux, which seems // a safe assumption for now. -var signalMap = map[string]syscall.Signal{ +var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.Signal(0x6), "ALRM": syscall.Signal(0xe), "BUS": syscall.Signal(0x7), @@ -89,12 +88,8 @@ var signalMap = map[string]syscall.Signal{ "RTMAX": sigrtmax, } -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - panic("Unsupported on non-linux platforms") -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - panic("Unsupported on non-linux platforms") +// IsSignalIgnoredBySigProxy determines whether to sig-proxy should ignore syscall signal +// keep the container running or not. In unsupported OS this should not ignore any syscall signal. +func IsSignalIgnoredBySigProxy(s syscall.Signal) bool { + return false } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go index a6bf77277ce..becfd2eafd5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go @@ -4,9 +4,10 @@ package specgen import ( + "errors" + "github.com/containers/common/libimage" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *libimage.Image) (*spec.LinuxSeccomp, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go index 5616a4511de..064245602e8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go @@ -1,6 +1,8 @@ package specgen import ( + "errors" + "fmt" "strconv" "strings" @@ -8,7 +10,6 @@ import ( "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/rootless" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) var ( @@ -23,7 +24,7 @@ var ( ) func exclusiveOptions(opt1, opt2 string) error { - return errors.Errorf("%s and %s are mutually exclusive options", opt1, opt2) + return fmt.Errorf("%s and %s are mutually exclusive options", opt1, opt2) } // Validate verifies that the given SpecGenerator is valid and satisfies required @@ -33,18 +34,18 @@ func (s *SpecGenerator) Validate() error { // associated with them because those should be on the infra container. if len(s.Pod) > 0 && s.NetNS.NSMode == FromPod { if len(s.Networks) > 0 { - return errors.Wrap(define.ErrNetworkOnPodContainer, "networks must be defined when the pod is created") + return fmt.Errorf("networks must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } if len(s.PortMappings) > 0 || s.PublishExposedPorts { - return errors.Wrap(define.ErrNetworkOnPodContainer, "published or exposed ports must be defined when the pod is created") + return fmt.Errorf("published or exposed ports must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } if len(s.HostAdd) > 0 { - return errors.Wrap(define.ErrNetworkOnPodContainer, "extra host entries must be specified on the pod") + return fmt.Errorf("extra host entries must be specified on the pod: %w", define.ErrNetworkOnPodContainer) } } if s.NetNS.IsContainer() && len(s.HostAdd) > 0 { - return errors.Wrap(ErrInvalidSpecConfig, "cannot set extra host entries when the container is joined to another containers network namespace") + return fmt.Errorf("cannot set extra host entries when the container is joined to another containers network namespace: %w", ErrInvalidSpecConfig) } // @@ -52,22 +53,23 @@ func (s *SpecGenerator) Validate() error { // // Rootfs and Image cannot both populated if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 { - return errors.Wrap(ErrInvalidSpecConfig, "both image and rootfs cannot be simultaneously") + return fmt.Errorf("both image and rootfs cannot be simultaneously: %w", ErrInvalidSpecConfig) } // Cannot set hostname and utsns if len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() { if s.ContainerBasicConfig.UtsNS.IsPod() { - return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when joining the pod UTS namespace") + return fmt.Errorf("cannot set hostname when joining the pod UTS namespace: %w", ErrInvalidSpecConfig) } - return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when running in the host UTS namespace") + + return fmt.Errorf("cannot set hostname when running in the host UTS namespace: %w", ErrInvalidSpecConfig) } // systemd values must be true, false, or always if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) { - return errors.Wrapf(ErrInvalidSpecConfig, "--systemd values must be one of %q", strings.Join(SystemDValues, ", ")) + return fmt.Errorf("--systemd values must be one of %q: %w", strings.Join(SystemDValues, ", "), ErrInvalidSpecConfig) } - // sdnotify values must be container, conmon, or ignore - if len(s.ContainerBasicConfig.SdNotifyMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.SdNotifyMode), SdNotifyModeValues) { - return errors.Wrapf(ErrInvalidSpecConfig, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", ")) + + if err := define.ValidateSdNotifyMode(s.ContainerBasicConfig.SdNotifyMode); err != nil { + return err } // @@ -79,12 +81,12 @@ func (s *SpecGenerator) Validate() error { } // imagevolumemode must be one of ignore, tmpfs, or anonymous if given if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) { - return errors.Errorf("invalid ImageVolumeMode %q, value must be one of %s", + return fmt.Errorf("invalid ImageVolumeMode %q, value must be one of %s", s.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, ",")) } // shmsize conflicts with IPC namespace if s.ContainerStorageConfig.ShmSize != nil && (s.ContainerStorageConfig.IpcNS.IsHost() || s.ContainerStorageConfig.IpcNS.IsNone()) { - return errors.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS) + return fmt.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS) } // @@ -92,7 +94,7 @@ func (s *SpecGenerator) Validate() error { // // userns and idmappings conflict if s.UserNS.IsPrivate() && s.IDMappings == nil { - return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace") + return fmt.Errorf("IDMappings are required when not creating a User namespace: %w", ErrInvalidSpecConfig) } // @@ -142,11 +144,11 @@ func (s *SpecGenerator) Validate() error { for _, limit := range tmpnproc { limitSplit := strings.SplitN(limit, "=", 2) if len(limitSplit) < 2 { - return errors.Wrapf(invalidUlimitFormatError, "missing = in %s", limit) + return fmt.Errorf("missing = in %s: %w", limit, invalidUlimitFormatError) } valueSplit := strings.SplitN(limitSplit[1], ":", 2) if len(valueSplit) < 2 { - return errors.Wrapf(invalidUlimitFormatError, "missing : in %s", limit) + return fmt.Errorf("missing : in %s: %w", limit, invalidUlimitFormatError) } hard, err := strconv.Atoi(valueSplit[0]) if err != nil { @@ -196,7 +198,7 @@ func (s *SpecGenerator) Validate() error { } if s.NetNS.NSMode != Bridge && len(s.Networks) > 0 { // Note that we also get the ip and mac in the networks map - return errors.New("Networks and static ip/mac address can only be used with Bridge mode networking") + return errors.New("networks and static ip/mac address can only be used with Bridge mode networking") } return nil diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go index f1343f6e206..94c96794a35 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go @@ -1,6 +1,7 @@ package specgen import ( + "errors" "fmt" "net" "os" @@ -10,11 +11,12 @@ import ( "github.com/containers/common/pkg/cgroups" cutil "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" - "github.com/pkg/errors" ) type NamespaceMode string @@ -23,8 +25,7 @@ const ( // Default indicates the spec generator should determine // a sane default Default NamespaceMode = "default" - // Host means the the namespace is derived from - // the host + // Host means the namespace is derived from the host Host NamespaceMode = "host" // Path is the path to a namespace Path NamespaceMode = "path" @@ -40,10 +41,10 @@ const ( // None indicates the IPC namespace is created without mounting /dev/shm None NamespaceMode = "none" // NoNetwork indicates no network namespace should - // be joined. loopback should still exists. + // be joined. loopback should still exist. // Only used with the network namespace, invalid otherwise. NoNetwork NamespaceMode = "none" - // Bridge indicates that a CNI network stack + // Bridge indicates that the network backend (CNI/netavark) // should be used. // Only used with the network namespace, invalid otherwise. Bridge NamespaceMode = "bridge" @@ -51,6 +52,9 @@ const ( // be used. // Only used with the network namespace, invalid otherwise. Slirp NamespaceMode = "slirp4netns" + // Pasta indicates that a pasta network stack should be used. + // Only used with the network namespace, invalid otherwise. + Pasta NamespaceMode = "pasta" // KeepId indicates a user namespace to keep the owner uid inside // of the namespace itself. // Only used with the user namespace, invalid otherwise. @@ -155,21 +159,26 @@ func validateNetNS(n *Namespace) error { switch n.NSMode { case Slirp: break + case Pasta: + if rootless.IsRootless() { + break + } + return fmt.Errorf("pasta networking is only supported for rootless mode") case "", Default, Host, Path, FromContainer, FromPod, Private, NoNetwork, Bridge: break default: - return errors.Errorf("invalid network %q", n.NSMode) + return fmt.Errorf("invalid network %q", n.NSMode) } // Path and From Container MUST have a string value set if n.NSMode == Path || n.NSMode == FromContainer { if len(n.Value) < 1 { - return errors.Errorf("namespace mode %s requires a value", n.NSMode) + return fmt.Errorf("namespace mode %s requires a value", n.NSMode) } } else if n.NSMode != Slirp { // All others except must NOT set a string value if len(n.Value) > 0 { - return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) + return fmt.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) } } @@ -196,21 +205,21 @@ func (n *Namespace) validate() error { switch n.NSMode { case "", Default, Host, Path, FromContainer, FromPod, Private: // Valid, do nothing - case NoNetwork, Bridge, Slirp: - return errors.Errorf("cannot use network modes with non-network namespace") + case NoNetwork, Bridge, Slirp, Pasta: + return errors.New("cannot use network modes with non-network namespace") default: - return errors.Errorf("invalid namespace type %s specified", n.NSMode) + return fmt.Errorf("invalid namespace type %s specified", n.NSMode) } // Path and From Container MUST have a string value set if n.NSMode == Path || n.NSMode == FromContainer { if len(n.Value) < 1 { - return errors.Errorf("namespace mode %s requires a value", n.NSMode) + return fmt.Errorf("namespace mode %s requires a value", n.NSMode) } } else { // All others must NOT set a string value if len(n.Value) > 0 { - return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) + return fmt.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) } } return nil @@ -231,19 +240,19 @@ func ParseNamespace(ns string) (Namespace, error) { case strings.HasPrefix(ns, "ns:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") + return toReturn, fmt.Errorf("must provide a path to a namespace when specifying \"ns:\"") } toReturn.NSMode = Path toReturn.Value = split[1] case strings.HasPrefix(ns, "container:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") + return toReturn, fmt.Errorf("must provide name or ID or a container when specifying \"container:\"") } toReturn.NSMode = FromContainer toReturn.Value = split[1] default: - return toReturn, errors.Errorf("unrecognized namespace mode %s passed", ns) + return toReturn, fmt.Errorf("unrecognized namespace mode %s passed", ns) } return toReturn, nil @@ -266,7 +275,7 @@ func ParseCgroupNamespace(ns string) (Namespace, error) { case "private", "": toReturn.NSMode = Private default: - return toReturn, errors.Errorf("unrecognized cgroup namespace mode %s passed", ns) + return toReturn, fmt.Errorf("unrecognized cgroup namespace mode %s passed", ns) } } else { toReturn.NSMode = Host @@ -274,7 +283,7 @@ func ParseCgroupNamespace(ns string) (Namespace, error) { return toReturn, nil } -// ParseIPCNamespace parses a ipc namespace specification in string +// ParseIPCNamespace parses an ipc namespace specification in string // form. func ParseIPCNamespace(ns string) (Namespace, error) { toReturn := Namespace{} @@ -300,7 +309,7 @@ func ParseUserNamespace(ns string) (Namespace, error) { case strings.HasPrefix(ns, "auto:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, errors.Errorf("invalid setting for auto: mode") + return toReturn, errors.New("invalid setting for auto: mode") } toReturn.NSMode = Auto toReturn.Value = split[1] @@ -308,6 +317,14 @@ func ParseUserNamespace(ns string) (Namespace, error) { case ns == "keep-id": toReturn.NSMode = KeepID return toReturn, nil + case strings.HasPrefix(ns, "keep-id:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, errors.New("invalid setting for keep-id: mode") + } + toReturn.NSMode = KeepID + toReturn.Value = split[1] + return toReturn, nil case ns == "nomap": toReturn.NSMode = NoMap return toReturn, nil @@ -320,7 +337,8 @@ func ParseUserNamespace(ns string) (Namespace, error) { // ParseNetworkFlag parses a network string slice into the network options // If the input is nil or empty it will use the default setting from containers.conf -func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { +// TODO (5.0): Drop pastaNetworkNameExists +func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { var networkOptions map[string][]string // by default we try to use the containers.conf setting // if we get at least one value use this instead @@ -365,17 +383,33 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork case strings.HasPrefix(ns, "ns:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, nil, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") + return toReturn, nil, nil, errors.New("must provide a path to a namespace when specifying \"ns:\"") } toReturn.NSMode = Path toReturn.Value = split[1] case strings.HasPrefix(ns, string(FromContainer)+":"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, nil, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") + return toReturn, nil, nil, errors.New("must provide name or ID or a container when specifying \"container:\"") } toReturn.NSMode = FromContainer toReturn.Value = split[1] + case ns == string(Pasta), strings.HasPrefix(ns, string(Pasta)+":"): + var parts []string + + if pastaNetworkNameExists { + goto nextCase + } + + parts = strings.SplitN(ns, ":", 2) + if len(parts) > 1 { + networkOptions = make(map[string][]string) + networkOptions[parts[0]] = strings.Split(parts[1], ",") + } + toReturn.NSMode = Pasta + break + nextCase: + fallthrough default: // we should have a normal network parts := strings.SplitN(ns, ":", 2) @@ -391,7 +425,7 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork } netOpts, err := parseBridgeNetworkOptions(parts[1]) if err != nil { - return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err) } podmanNetworks[parts[0]] = netOpts } @@ -402,24 +436,25 @@ func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetwork if len(networks) > 1 { if !toReturn.IsBridge() { - return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "cannot set multiple networks without bridge network mode, selected mode %s", toReturn.NSMode) + return toReturn, nil, nil, fmt.Errorf("cannot set multiple networks without bridge network mode, selected mode %s: %w", toReturn.NSMode, define.ErrInvalidArg) } for _, network := range networks[1:] { parts := strings.SplitN(network, ":", 2) if parts[0] == "" { - return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "network name cannot be empty") + return toReturn, nil, nil, fmt.Errorf("network name cannot be empty: %w", define.ErrInvalidArg) } + // TODO (5.0): Don't accept string(Pasta) here once we drop pastaNetworkNameExists if cutil.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork), string(Default), string(Private), string(Path), string(FromContainer), string(Host)}) { - return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "can only set extra network names, selected mode %s conflicts with bridge", parts[0]) + return toReturn, nil, nil, fmt.Errorf("can only set extra network names, selected mode %s conflicts with bridge: %w", parts[0], define.ErrInvalidArg) } netOpts := types.PerNetworkOptions{} if len(parts) > 1 { var err error netOpts, err = parseBridgeNetworkOptions(parts[1]) if err != nil { - return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err) } } podmanNetworks[parts[0]] = netOpts @@ -441,7 +476,7 @@ func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) { case "ip", "ip6": ip := net.ParseIP(split[1]) if ip == nil { - return netOpts, errors.Errorf("invalid ip address %q", split[1]) + return netOpts, fmt.Errorf("invalid ip address %q", split[1]) } netOpts.StaticIPs = append(netOpts.StaticIPs, ip) @@ -465,7 +500,7 @@ func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) { netOpts.InterfaceName = split[1] default: - return netOpts, errors.Errorf("unknown bridge network option: %s", split[0]) + return netOpts, fmt.Errorf("unknown bridge network option: %s", split[0]) } } return netOpts, nil @@ -477,7 +512,7 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene switch userns.NSMode { case Path: if _, err := os.Stat(userns.Value); err != nil { - return user, errors.Wrap(err, "cannot find specified user namespace path") + return user, fmt.Errorf("cannot find specified user namespace path: %w", err) } if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), userns.Value); err != nil { return user, err @@ -490,13 +525,18 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene return user, err } case KeepID: - mappings, uid, gid, err := util.GetKeepIDMapping() + opts, err := namespaces.UsernsMode(userns.String()).GetKeepIDOptions() + if err != nil { + return user, err + } + mappings, uid, gid, err := util.GetKeepIDMapping(opts) if err != nil { return user, err } idmappings = mappings g.SetProcessUID(uint32(uid)) g.SetProcessGID(uint32(gid)) + g.AddProcessAdditionalGid(uint32(gid)) user = fmt.Sprintf("%d:%d", uid, gid) if err := privateUserNamespace(idmappings, g); err != nil { return user, err @@ -509,6 +549,7 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene idmappings = mappings g.SetProcessUID(uint32(uid)) g.SetProcessGID(uint32(gid)) + g.AddProcessAdditionalGid(uint32(gid)) user = fmt.Sprintf("%d:%d", uid, gid) if err := privateUserNamespace(idmappings, g); err != nil { return user, err @@ -526,7 +567,7 @@ func privateUserNamespace(idmappings *storage.IDMappingOptions, g *generate.Gene return err } if idmappings == nil || (len(idmappings.UIDMap) == 0 && len(idmappings.GIDMap) == 0) { - return errors.Errorf("must provide at least one UID or GID mapping to configure a user namespace") + return errors.New("must provide at least one UID or GID mapping to configure a user namespace") } for _, uidmap := range idmappings.UIDMap { g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go index 8d971a25e20..1c0413cd5c9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go @@ -1,8 +1,10 @@ package specgen import ( + "errors" + "fmt" + "github.com/containers/podman/v4/pkg/util" - "github.com/pkg/errors" ) var ( @@ -13,7 +15,7 @@ var ( ) func exclusivePodOptions(opt1, opt2 string) error { - return errors.Wrapf(ErrInvalidPodSpecConfig, "%s and %s are mutually exclusive pod options", opt1, opt2) + return fmt.Errorf("%s and %s are mutually exclusive pod options: %w", opt1, opt2, ErrInvalidPodSpecConfig) } // Validate verifies the input is valid @@ -63,9 +65,9 @@ func (p *PodSpecGenerator) Validate() error { return exclusivePodOptions("NoInfra", "NoManageResolvConf") } } - if p.NetNS.NSMode != "" && p.NetNS.NSMode != Bridge && p.NetNS.NSMode != Slirp && p.NetNS.NSMode != Default { + if p.NetNS.NSMode != "" && p.NetNS.NSMode != Bridge && p.NetNS.NSMode != Slirp && p.NetNS.NSMode != Pasta && p.NetNS.NSMode != Default { if len(p.PortMappings) > 0 { - return errors.New("PortMappings can only be used with Bridge or slirp4netns networking") + return errors.New("PortMappings can only be used with Bridge, slirp4netns, or pasta networking") } } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go index ad9414f67b8..faeeb2ed6a5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go @@ -20,6 +20,8 @@ type PodBasicConfig struct { // all containers in the pod as long as the UTS namespace is shared. // Optional. Hostname string `json:"hostname,omitempty"` + // ExitPolicy determines the pod's exit and stop behaviour. + ExitPolicy string `json:"exit_policy,omitempty"` // Labels are key-value pairs that are used to add metadata to pods. // Optional. Labels map[string]string `json:"labels,omitempty"` @@ -75,6 +77,8 @@ type PodBasicConfig struct { // Any containers created within the pod will inherit the pod's userns settings. // Optional Userns Namespace `json:"userns,omitempty"` + // UtsNs is used to indicate the UTS mode the pod is in + UtsNs Namespace `json:"utsns,omitempty"` // Devices contains user specified Devices to be added to the Pod Devices []string `json:"pod_devices,omitempty"` // Sysctl sets kernel parameters for the pod @@ -94,7 +98,7 @@ type PodNetworkConfig struct { // PortMappings is a set of ports to map into the infra container. // As, by default, containers share their network with the infra // container, this will forward the ports to the entire pod. - // Only available if NetNS is set to Bridge or Slirp. + // Only available if NetNS is set to Bridge, Slirp, or Pasta. // Optional. PortMappings []types.PortMapping `json:"portmappings,omitempty"` // Map of networks names to ids the container should join to. @@ -181,6 +185,10 @@ type PodStorageConfig struct { // comma-separated options. Valid options are 'ro', 'rw', and 'z'. // Options will be used for all volumes sourced from the container. VolumesFrom []string `json:"volumes_from,omitempty"` + // ShmSize is the size of the tmpfs to mount in at /dev/shm, in bytes. + // Conflicts with ShmSize if IpcNS is not private. + // Optional. + ShmSize *int64 `json:"shm_size,omitempty"` } // PodCgroupConfig contains configuration options about a pod's cgroups. @@ -203,6 +211,9 @@ type PodSpecGenerator struct { PodStorageConfig PodSecurityConfig InfraContainerSpec *SpecGenerator `json:"-"` + + // The ID of the pod's service container. + ServiceContainerID string `json:"serviceContainerID,omitempty"` } type PodResourceConfig struct { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/resources_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/resources_freebsd.go new file mode 100644 index 00000000000..49e5976bb7c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/resources_freebsd.go @@ -0,0 +1,8 @@ +package specgen + +import ( + "github.com/containers/common/pkg/config" +) + +func (s *SpecGenerator) InitResourceLimits(rtc *config.Config) { +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/resources_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/resources_linux.go new file mode 100644 index 00000000000..ffa9e5786f0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/resources_linux.go @@ -0,0 +1,22 @@ +package specgen + +import ( + "github.com/containers/common/pkg/config" + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +func (s *SpecGenerator) InitResourceLimits(rtc *config.Config) { + if s.ResourceLimits == nil || s.ResourceLimits.Pids == nil { + if s.CgroupsMode != "disabled" { + limit := rtc.PidsLimit() + if limit != 0 { + if s.ResourceLimits == nil { + s.ResourceLimits = &spec.LinuxResources{} + } + s.ResourceLimits.Pids = &spec.LinuxPids{ + Limit: limit, + } + } + } + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go index 79e20667b12..2e7078115ef 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go @@ -1,6 +1,7 @@ package specgen import ( + "errors" "net" "strings" "syscall" @@ -8,12 +9,13 @@ import ( "github.com/containers/common/libimage" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/storage/types" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) -// LogConfig describes the logging characteristics for a container +// LogConfig describes the logging characteristics for a container +// swagger:model LogConfigLibpod type LogConfig struct { // LogDriver is the container's log driver. // Optional. @@ -103,6 +105,12 @@ type ContainerBasicConfig struct { // RawImageName is the user-specified and unprocessed input referring // to a local or a remote image. RawImageName string `json:"raw_image_name,omitempty"` + // ImageOS is the user-specified image OS + ImageOS string `json:"image_os,omitempty"` + // ImageArch is the user-specified image architecture + ImageArch string `json:"image_arch,omitempty"` + // ImageVariant is the user-specified image variant + ImageVariant string `json:"image_variant,omitempty"` // RestartPolicy is the container's restart policy - an action which // will be taken when the container exits. // If not given, the default policy, which does nothing, will be used. @@ -197,6 +205,9 @@ type ContainerBasicConfig struct { // The execution domain system allows Linux to provide limited support // for binaries compiled under other UNIX-like operating systems. Personality *spec.LinuxPersonality `json:"personality,omitempty"` + // EnvMerge takes the specified environment variables from image and preprocess them before injecting them into the + // container. + EnvMerge []string `json:"envmerge,omitempty"` // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf // Optional. UnsetEnv []string `json:"unsetenv,omitempty"` @@ -373,6 +384,10 @@ type ContainerSecurityConfig struct { // ReadOnlyFilesystem indicates that everything will be mounted // as read-only ReadOnlyFilesystem bool `json:"read_only_filesystem,omitempty"` + // ReadWriteTmpfs indicates that when running with a ReadOnlyFilesystem + // mount temporary file systems + ReadWriteTmpfs bool `json:"read_write_tmpfs,omitempty"` + // Umask is the umask the init process of the container will be run with. Umask string `json:"umask,omitempty"` // ProcOpts are the options used for the proc mount. @@ -410,7 +425,7 @@ type ContainerNetworkConfig struct { // Mandatory. NetNS Namespace `json:"netns,omitempty"` // PortBindings is a set of ports to map into the container. - // Only available if NetNS is set to bridge or slirp. + // Only available if NetNS is set to bridge, slirp, or pasta. // Optional. PortMappings []nettypes.PortMapping `json:"portmappings,omitempty"` // PublishExposedPorts will publish ports specified in the image to @@ -523,7 +538,12 @@ type ContainerResourceConfig struct { // ContainerHealthCheckConfig describes a container healthcheck with attributes // like command, retries, interval, start period, and timeout. type ContainerHealthCheckConfig struct { - HealthConfig *manifest.Schema2HealthConfig `json:"healthconfig,omitempty"` + HealthConfig *manifest.Schema2HealthConfig `json:"healthconfig,omitempty"` + HealthCheckOnFailureAction define.HealthCheckOnFailureAction `json:"health_check_on_failure_action,omitempty"` + // Startup healthcheck for a container. + // Requires that HealthConfig be set. + // Optional. + StartupHealthConfig *define.StartupHealthCheck `json:"startupHealthConfig,omitempty"` } // SpecGenerator creates an OCI spec and Libpod configuration options to create @@ -565,10 +585,12 @@ type Secret struct { var ( // ErrNoStaticIPRootless is used when a rootless user requests to assign a static IP address // to a pod or container - ErrNoStaticIPRootless error = errors.New("rootless containers and pods cannot be assigned static IP addresses") + ErrNoStaticIPRootless = errors.New("rootless containers and pods cannot be assigned static IP addresses") // ErrNoStaticMACRootless is used when a rootless user requests to assign a static MAC address // to a pod or container - ErrNoStaticMACRootless error = errors.New("rootless containers and pods cannot be assigned static MAC addresses") + ErrNoStaticMACRootless = errors.New("rootless containers and pods cannot be assigned static MAC addresses") + // Multiple volume mounts to the same destination is not allowed + ErrDuplicateDest = errors.New("duplicate mount destination") ) // NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs @@ -595,3 +617,15 @@ func NewSpecGeneratorWithRootfs(rootfs string) *SpecGenerator { csc := ContainerStorageConfig{Rootfs: rootfs} return &SpecGenerator{ContainerStorageConfig: csc} } + +func StringSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/utils.go b/vendor/github.com/containers/podman/v4/pkg/specgen/utils.go new file mode 100644 index 00000000000..dc9127bb318 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/utils.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package specgen + +// FinishThrottleDevices cannot be called on non-linux OS' due to importing unix functions +func FinishThrottleDevices(s *SpecGenerator) error { + return nil +} + +// WeightDevices cannot be called on non-linux OS' due to importing unix functions +func WeightDevices(s *SpecGenerator) error { + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go new file mode 100644 index 00000000000..d8e4cbae378 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go @@ -0,0 +1,103 @@ +//go:build linux +// +build linux + +package specgen + +import ( + "fmt" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +// FinishThrottleDevices takes the temporary representation of the throttle +// devices in the specgen and looks up the major and major minors. it then +// sets the throttle devices proper in the specgen +func FinishThrottleDevices(s *SpecGenerator) error { + if s.ResourceLimits == nil { + s.ResourceLimits = &spec.LinuxResources{} + } + if bps := s.ThrottleReadBpsDevice; len(bps) > 0 { + if s.ResourceLimits.BlockIO == nil { + s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{} + } + for k, v := range bps { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return fmt.Errorf("could not parse throttle device at %s: %w", k, err) + } + v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert + v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert + if s.ResourceLimits.BlockIO == nil { + s.ResourceLimits.BlockIO = new(spec.LinuxBlockIO) + } + s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v) + } + } + if bps := s.ThrottleWriteBpsDevice; len(bps) > 0 { + if s.ResourceLimits.BlockIO == nil { + s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{} + } + for k, v := range bps { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return fmt.Errorf("could not parse throttle device at %s: %w", k, err) + } + v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert + v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert + s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v) + } + } + if iops := s.ThrottleReadIOPSDevice; len(iops) > 0 { + if s.ResourceLimits.BlockIO == nil { + s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{} + } + for k, v := range iops { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return fmt.Errorf("could not parse throttle device at %s: %w", k, err) + } + v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert + v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert + s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v) + } + } + if iops := s.ThrottleWriteIOPSDevice; len(iops) > 0 { + if s.ResourceLimits.BlockIO == nil { + s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{} + } + for k, v := range iops { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return fmt.Errorf("could not parse throttle device at %s: %w", k, err) + } + v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert + v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert + s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v) + } + } + return nil +} + +func WeightDevices(specgen *SpecGenerator) error { + devs := []spec.LinuxWeightDevice{} + if specgen.ResourceLimits == nil { + specgen.ResourceLimits = &spec.LinuxResources{} + } + for k, v := range specgen.WeightDevice { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return fmt.Errorf("failed to inspect '%s' in --blkio-weight-device: %w", k, err) + } + dev := new(spec.LinuxWeightDevice) + dev.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert + dev.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert + dev.Weight = v.Weight + devs = append(devs, *dev) + if specgen.ResourceLimits.BlockIO == nil { + specgen.ResourceLimits.BlockIO = &spec.LinuxBlockIO{} + } + specgen.ResourceLimits.BlockIO.WeightDevice = devs + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go index b26666df3db..b8b2ece8bd9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go @@ -1,11 +1,14 @@ package specgen import ( + "errors" + "fmt" + "path/filepath" "strings" "github.com/containers/common/pkg/parse" + "github.com/containers/podman/v4/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -21,9 +24,14 @@ type NamedVolume struct { Dest string // Options are options that the named volume will be mounted with. Options []string + // IsAnonymous sets the named volume as anonymous even if it has a name + // This is used for emptyDir volumes from a kube yaml + IsAnonymous bool + // SubPath stores the sub directory of the named volume to be mounted in the container + SubPath string } -// OverlayVolume holds information about a overlay volume that will be mounted into +// OverlayVolume holds information about an overlay volume that will be mounted into // the container. type OverlayVolume struct { // Destination is the absolute path where the mount will be placed in the container. @@ -36,7 +44,7 @@ type OverlayVolume struct { // ImageVolume is a volume based on a container image. The container image is // first mounted on the host and is then bind-mounted into the container. An -// ImageVolume is always mounted read only. +// ImageVolume is always mounted read-only. type ImageVolume struct { // Source is the source of the image volume. The image can be referred // to by name and by ID. @@ -49,14 +57,11 @@ type ImageVolume struct { // GenVolumeMounts parses user input into mounts, volumes and overlay volumes func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*NamedVolume, map[string]*OverlayVolume, error) { - errDuplicateDest := errors.Errorf("duplicate mount destination") - mounts := make(map[string]spec.Mount) volumes := make(map[string]*NamedVolume) overlayVolumes := make(map[string]*OverlayVolume) - volumeFormatErr := errors.Errorf("incorrect volume format, should be [host-dir:]ctr-dir[:option]") - + volumeFormatErr := errors.New("incorrect volume format, should be [host-dir:]ctr-dir[:option]") for _, vol := range volumeFlag { var ( options []string @@ -67,10 +72,24 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na splitVol := SplitVolumeString(vol) if len(splitVol) > 3 { - return nil, nil, nil, errors.Wrapf(volumeFormatErr, vol) + return nil, nil, nil, fmt.Errorf("%v: %w", vol, volumeFormatErr) } src = splitVol[0] + + // Support relative paths beginning with ./ + if strings.HasPrefix(src, "./") { + path, err := filepath.EvalSymlinks(src) + if err != nil { + return nil, nil, nil, err + } + src, err = filepath.Abs(path) + if err != nil { + return nil, nil, nil, err + } + splitVol[0] = src + } + if len(splitVol) == 1 { // This is an anonymous named volume. Only thing given // is destination. @@ -97,6 +116,8 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na // This is not a named volume overlayFlag := false chownFlag := false + upperDirFlag := false + workDirFlag := false for _, o := range options { if o == "O" { overlayFlag = true @@ -105,32 +126,55 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na if strings.Contains(joinedOpts, "U") { chownFlag = true } - - if len(options) > 2 || (len(options) == 2 && !chownFlag) { + if strings.Contains(joinedOpts, "upperdir") { + upperDirFlag = true + } + if strings.Contains(joinedOpts, "workdir") { + workDirFlag = true + } + if (workDirFlag && !upperDirFlag) || (!workDirFlag && upperDirFlag) { + return nil, nil, nil, errors.New("must set both `upperdir` and `workdir`") + } + if len(options) > 2 && !(len(options) == 3 && upperDirFlag && workDirFlag) || (len(options) == 2 && !chownFlag) { return nil, nil, nil, errors.New("can't use 'O' with other options") } } } if overlayFlag { - // This is a overlay volume + // This is an overlay volume newOverlayVol := new(OverlayVolume) newOverlayVol.Destination = dest - newOverlayVol.Source = src + // convert src to absolute path so we don't end up passing + // relative values as lowerdir for overlay mounts + source, err := filepath.Abs(src) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed while resolving absolute path for source %v for overlay mount: %w", src, err) + } + newOverlayVol.Source = source newOverlayVol.Options = options - if _, ok := overlayVolumes[newOverlayVol.Destination]; ok { - return nil, nil, nil, errors.Wrapf(errDuplicateDest, newOverlayVol.Destination) + if vol, ok := overlayVolumes[newOverlayVol.Destination]; ok { + if vol.Source == newOverlayVol.Source && + StringSlicesEqual(vol.Options, newOverlayVol.Options) { + continue + } + return nil, nil, nil, fmt.Errorf("%v: %w", newOverlayVol.Destination, ErrDuplicateDest) } overlayVolumes[newOverlayVol.Destination] = newOverlayVol } else { newMount := spec.Mount{ Destination: dest, - Type: "bind", + Type: define.TypeBind, Source: src, Options: options, } - if _, ok := mounts[newMount.Destination]; ok { - return nil, nil, nil, errors.Wrapf(errDuplicateDest, newMount.Destination) + if vol, ok := mounts[newMount.Destination]; ok { + if vol.Source == newMount.Source && + StringSlicesEqual(vol.Options, newMount.Options) { + continue + } + + return nil, nil, nil, fmt.Errorf("%v: %w", newMount.Destination, ErrDuplicateDest) } mounts[newMount.Destination] = newMount } @@ -141,8 +185,11 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na newNamedVol.Dest = dest newNamedVol.Options = options - if _, ok := volumes[newNamedVol.Dest]; ok { - return nil, nil, nil, errors.Wrapf(errDuplicateDest, newNamedVol.Dest) + if vol, ok := volumes[newNamedVol.Dest]; ok { + if vol.Name == newNamedVol.Name { + continue + } + return nil, nil, nil, fmt.Errorf("%v: %w", newNamedVol.Dest, ErrDuplicateDest) } volumes[newNamedVol.Dest] = newNamedVol } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go index 0df4ebdd7b7..5c19aeb4b23 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go @@ -1,11 +1,10 @@ package specgen import ( + "errors" "fmt" "strings" "unicode" - - "github.com/pkg/errors" ) func isHostWinPath(path string) bool { diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go b/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go deleted file mode 100644 index 53290be2492..00000000000 --- a/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows -// +build !windows - -package terminal - -// SetConsole for non-windows environments is a no-op. -func SetConsole() error { - return nil -} diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go b/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go deleted file mode 100644 index 1a7da333511..00000000000 --- a/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build windows -// +build windows - -package terminal - -import ( - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -// SetConsole switches the windows terminal mode to be able to handle colors, etc -func SetConsole() error { - if err := setConsoleMode(windows.Stdout, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { - return err - } - if err := setConsoleMode(windows.Stderr, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { - return err - } - if err := setConsoleMode(windows.Stdin, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { - return err - } - return nil -} - -func setConsoleMode(handle windows.Handle, flags uint32) error { - var mode uint32 - err := windows.GetConsoleMode(handle, &mode) - if err != nil { - return nil // not a terminal - } - if err := windows.SetConsoleMode(handle, mode|flags); err != nil { - // In similar code, it is not considered an error if we cannot set the - // console mode. Following same line of thinking here. - logrus.WithError(err).Debug("Failed to set console mode for cli") - } - - return nil -} diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/util.go b/vendor/github.com/containers/podman/v4/pkg/terminal/util.go deleted file mode 100644 index 0f0968c30b4..00000000000 --- a/vendor/github.com/containers/podman/v4/pkg/terminal/util.go +++ /dev/null @@ -1,134 +0,0 @@ -package terminal - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/containers/storage/pkg/homedir" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/knownhosts" - "golang.org/x/term" -) - -var ( - passPhrase []byte - phraseSync sync.Once - password []byte - passwordSync sync.Once -) - -// ReadPassword prompts for a secret and returns value input by user from stdin -// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported. -// Additionally, all input after `/n` is queued to podman command. -func ReadPassword(prompt string) (pw []byte, err error) { - fd := int(os.Stdin.Fd()) - if term.IsTerminal(fd) { - fmt.Fprint(os.Stderr, prompt) - pw, err = term.ReadPassword(fd) - fmt.Fprintln(os.Stderr) - return - } - - var b [1]byte - for { - n, err := os.Stdin.Read(b[:]) - // terminal.ReadPassword discards any '\r', so we do the same - if n > 0 && b[0] != '\r' { - if b[0] == '\n' { - return pw, nil - } - pw = append(pw, b[0]) - // limit size, so that a wrong input won't fill up the memory - if len(pw) > 1024 { - err = errors.New("password too long, 1024 byte limit") - } - } - if err != nil { - // terminal.ReadPassword accepts EOF-terminated passwords - // if non-empty, so we do the same - if err == io.EOF && len(pw) > 0 { - err = nil - } - return pw, err - } - } -} - -func PublicKey(path string, passphrase []byte) (ssh.Signer, error) { - key, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - signer, err := ssh.ParsePrivateKey(key) - if err != nil { - if _, ok := err.(*ssh.PassphraseMissingError); !ok { - return nil, err - } - if len(passphrase) == 0 { - passphrase = ReadPassphrase() - } - return ssh.ParsePrivateKeyWithPassphrase(key, passphrase) - } - return signer, nil -} - -func ReadPassphrase() []byte { - phraseSync.Do(func() { - secret, err := ReadPassword("Key Passphrase: ") - if err != nil { - secret = []byte{} - } - passPhrase = secret - }) - return passPhrase -} - -func ReadLogin() []byte { - passwordSync.Do(func() { - secret, err := ReadPassword("Login password: ") - if err != nil { - secret = []byte{} - } - password = secret - }) - return password -} - -func HostKey(host string) ssh.PublicKey { - // parse OpenSSH known_hosts file - // ssh or use ssh-keyscan to get initial key - knownHosts := filepath.Join(homedir.Get(), ".ssh", "known_hosts") - fd, err := os.Open(knownHosts) - if err != nil { - logrus.Error(err) - return nil - } - - // support -H parameter for ssh-keyscan - hashhost := knownhosts.HashHostname(host) - - scanner := bufio.NewScanner(fd) - for scanner.Scan() { - _, hosts, key, _, _, err := ssh.ParseKnownHosts(scanner.Bytes()) - if err != nil { - logrus.Errorf("Failed to parse known_hosts: %s", scanner.Text()) - continue - } - - for _, h := range hosts { - if h == host || h == hashhost { - return key - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go index 5e9c6a1591d..4defa32ea6d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go @@ -103,8 +103,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/config.go b/vendor/github.com/containers/podman/v4/pkg/trust/config.go deleted file mode 100644 index 6186d4cbd19..00000000000 --- a/vendor/github.com/containers/podman/v4/pkg/trust/config.go +++ /dev/null @@ -1,12 +0,0 @@ -package trust - -// Policy describes a basic trust policy configuration -type Policy struct { - Transport string `json:"transport"` - Name string `json:"name,omitempty"` - RepoName string `json:"repo_name,omitempty"` - Keys []string `json:"keys,omitempty"` - SignatureStore string `json:"sigstore,omitempty"` - Type string `json:"type"` - GPGId string `json:"gpg_id,omitempty"` -} diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/policy.go b/vendor/github.com/containers/podman/v4/pkg/trust/policy.go new file mode 100644 index 00000000000..aa14fc7e152 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/trust/policy.go @@ -0,0 +1,248 @@ +package trust + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// policyContent is the overall structure of a policy.json file (= c/image/v5/signature.Policy) +type policyContent struct { + Default []repoContent `json:"default"` + Transports transportsContent `json:"transports,omitempty"` +} + +// transportsContent contains policies for individual transports (= c/image/v5/signature.Policy.Transports) +type transportsContent map[string]repoMap + +// repoMap maps a scope name to requirements that apply to that scope (= c/image/v5/signature.PolicyTransportScopes) +type repoMap map[string][]repoContent + +// repoContent is a single policy requirement (one of possibly several for a scope), representing all of the individual alternatives in a single merged struct +// (= c/image/v5/signature.{PolicyRequirement,pr*}) +type repoContent struct { + Type string `json:"type"` + KeyType string `json:"keyType,omitempty"` + KeyPath string `json:"keyPath,omitempty"` + KeyPaths []string `json:"keyPaths,omitempty"` + KeyData string `json:"keyData,omitempty"` + SignedIdentity json.RawMessage `json:"signedIdentity,omitempty"` +} + +// genericPolicyContent is the overall structure of a policy.json file (= c/image/v5/signature.Policy), using generic data for individual requirements. +type genericPolicyContent struct { + Default json.RawMessage `json:"default"` + Transports genericTransportsContent `json:"transports,omitempty"` +} + +// genericTransportsContent contains policies for individual transports (= c/image/v5/signature.Policy.Transports), using generic data for individual requirements. +type genericTransportsContent map[string]genericRepoMap + +// genericRepoMap maps a scope name to requirements that apply to that scope (= c/image/v5/signature.PolicyTransportScopes) +type genericRepoMap map[string]json.RawMessage + +// DefaultPolicyPath returns a path to the default policy of the system. +func DefaultPolicyPath(sys *types.SystemContext) string { + systemDefaultPolicyPath := config.DefaultSignaturePolicyPath + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// gpgIDReader returns GPG key IDs of keys stored at the provided path. +// It exists only for tests, production code should always use getGPGIdFromKeyPath. +type gpgIDReader func(string) []string + +// createTmpFile creates a temp file under dir and writes the content into it +func createTmpFile(dir, pattern string, content []byte) (string, error) { + tmpfile, err := os.CreateTemp(dir, pattern) + if err != nil { + return "", err + } + defer tmpfile.Close() + + if _, err := tmpfile.Write(content); err != nil { + return "", err + } + return tmpfile.Name(), nil +} + +// getGPGIdFromKeyPath returns GPG key IDs of keys stored at the provided path. +func getGPGIdFromKeyPath(path string) []string { + cmd := exec.Command("gpg2", "--with-colons", path) + results, err := cmd.Output() + if err != nil { + logrus.Errorf("Getting key identity: %s", err) + return nil + } + return parseUids(results) +} + +// getGPGIdFromKeyData returns GPG key IDs of keys in the provided keyring. +func getGPGIdFromKeyData(idReader gpgIDReader, key string) []string { + decodeKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + logrus.Errorf("%s, error decoding key data", err) + return nil + } + tmpfileName, err := createTmpFile("", "", decodeKey) + if err != nil { + logrus.Errorf("Creating key date temp file %s", err) + } + defer os.Remove(tmpfileName) + return idReader(tmpfileName) +} + +func parseUids(colonDelimitKeys []byte) []string { + var parseduids []string + scanner := bufio.NewScanner(bytes.NewReader(colonDelimitKeys)) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "uid:") || strings.HasPrefix(line, "pub:") { + uid := strings.Split(line, ":")[9] + if uid == "" { + continue + } + parseduid := uid + if strings.Contains(uid, "<") && strings.Contains(uid, ">") { + parseduid = strings.SplitN(strings.SplitAfterN(uid, "<", 2)[1], ">", 2)[0] + } + parseduids = append(parseduids, parseduid) + } + } + return parseduids +} + +// getPolicy parses policy.json into policyContent. +func getPolicy(policyPath string) (policyContent, error) { + var policyContentStruct policyContent + policyContent, err := os.ReadFile(policyPath) + if err != nil { + return policyContentStruct, fmt.Errorf("unable to read policy file: %w", err) + } + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return policyContentStruct, fmt.Errorf("could not parse trust policies from %s: %w", policyPath, err) + } + return policyContentStruct, nil +} + +var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "sigstoreSigned": "sigstoreSigned", "reject": "reject"} + +func trustTypeDescription(trustType string) string { + trustDescription, exist := typeDescription[trustType] + if !exist { + logrus.Warnf("Invalid trust type %s", trustType) + } + return trustDescription +} + +// AddPolicyEntriesInput collects some parameters to AddPolicyEntries, +// primarily so that the callers use named values instead of just strings in a sequence. +type AddPolicyEntriesInput struct { + Scope string // "default" or a docker/atomic scope name + Type string + PubKeyFiles []string // For signature enforcement types, paths to public keys files (where the image needs to be signed by at least one key from _each_ of the files). File format depends on Type. +} + +// AddPolicyEntries adds one or more policy entries necessary to implement AddPolicyEntriesInput. +func AddPolicyEntries(policyPath string, input AddPolicyEntriesInput) error { + var ( + policyContentStruct genericPolicyContent + newReposContent []repoContent + ) + trustType := input.Type + if trustType == "accept" { + trustType = "insecureAcceptAnything" + } + pubkeysfile := input.PubKeyFiles + + // The error messages in validation failures use input.Type instead of trustType to match the user’s input. + switch trustType { + case "insecureAcceptAnything", "reject": + if len(pubkeysfile) != 0 { + return fmt.Errorf("%d public keys unexpectedly provided for trust type %v", len(pubkeysfile), input.Type) + } + newReposContent = append(newReposContent, repoContent{Type: trustType}) + + case "signedBy": + if len(pubkeysfile) == 0 { + return errors.New("at least one public key must be defined for type 'signedBy'") + } + for _, filepath := range pubkeysfile { + newReposContent = append(newReposContent, repoContent{Type: trustType, KeyType: "GPGKeys", KeyPath: filepath}) + } + + case "sigstoreSigned": + if len(pubkeysfile) == 0 { + return errors.New("at least one public key must be defined for type 'sigstoreSigned'") + } + for _, filepath := range pubkeysfile { + newReposContent = append(newReposContent, repoContent{Type: trustType, KeyPath: filepath}) + } + + default: + return fmt.Errorf("unknown trust type %q", input.Type) + } + newReposJSON, err := json.Marshal(newReposContent) + if err != nil { + return err + } + + _, err = os.Stat(policyPath) + if !os.IsNotExist(err) { + policyContent, err := os.ReadFile(policyPath) + if err != nil { + return err + } + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return errors.New("could not read trust policies") + } + } + if input.Scope == "default" { + policyContentStruct.Default = json.RawMessage(newReposJSON) + } else { + if len(policyContentStruct.Default) == 0 { + return errors.New("default trust policy must be set") + } + registryExists := false + for transport, transportval := range policyContentStruct.Transports { + _, registryExists = transportval[input.Scope] + if registryExists { + policyContentStruct.Transports[transport][input.Scope] = json.RawMessage(newReposJSON) + break + } + } + if !registryExists { + if policyContentStruct.Transports == nil { + policyContentStruct.Transports = make(map[string]genericRepoMap) + } + if policyContentStruct.Transports["docker"] == nil { + policyContentStruct.Transports["docker"] = make(map[string]json.RawMessage) + } + policyContentStruct.Transports["docker"][input.Scope] = json.RawMessage(newReposJSON) + } + } + + data, err := json.MarshalIndent(policyContentStruct, "", " ") + if err != nil { + return fmt.Errorf("setting trust policy: %w", err) + } + return os.WriteFile(policyPath, data, 0644) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/registries.go b/vendor/github.com/containers/podman/v4/pkg/trust/registries.go new file mode 100644 index 00000000000..ed7bca1d61e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/trust/registries.go @@ -0,0 +1,125 @@ +package trust + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + "github.com/docker/docker/pkg/homedir" + "github.com/ghodss/yaml" +) + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `json:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. + LookasideStaging string `json:"lookaside-staging"` // For writing only. + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// systemRegistriesDirPath is the path to registries.d. +const systemRegistriesDirPath = "/etc/containers/registries.d" + +// userRegistriesDir is the path to the per user registries.d. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") + +// RegistriesDirPath returns a path to registries.d +func RegistriesDirPath(sys *types.SystemContext) string { + if sys != nil && sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + if _, err := os.Stat(userRegistriesDirPath); err == nil { + return userRegistriesDirPath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + + return systemRegistriesDirPath +} + +// loadAndMergeConfig loads registries.d configuration files in dirPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, fmt.Errorf("parsing %s: %w", configPath, err) + } + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, fmt.Errorf(`error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, fmt.Errorf(`error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + return &mergedConfig, nil +} + +// registriesDConfigurationForScope returns registries.d configuration for the provided scope. +// scope can be "" to return only the global default configuration entry. +func registriesDConfigurationForScope(registryConfigs *registryConfiguration, scope string) *registryNamespace { + searchScope := scope + if searchScope != "" { + if !strings.Contains(searchScope, "/") { + val, exists := registryConfigs.Docker[searchScope] + if exists { + return &val + } + } + for range strings.Split(scope, "/") { + val, exists := registryConfigs.Docker[searchScope] + if exists { + return &val + } + if strings.Contains(searchScope, "/") { + searchScope = searchScope[:strings.LastIndex(searchScope, "/")] + } + } + } + return registryConfigs.DefaultDocker +} diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/trust.go b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go index 1d0cc61babf..07d144bc112 100644 --- a/vendor/github.com/containers/podman/v4/pkg/trust/trust.go +++ b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go @@ -1,243 +1,127 @@ package trust import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "io/ioutil" - "os" - "os/exec" - "path/filepath" + "fmt" + "sort" "strings" - - "github.com/containers/image/v5/types" - "github.com/docker/docker/pkg/homedir" - "github.com/ghodss/yaml" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) -// PolicyContent struct for policy.json file -type PolicyContent struct { - Default []RepoContent `json:"default"` - Transports TransportsContent `json:"transports,omitempty"` -} - -// RepoContent struct used under each repo -type RepoContent struct { - Type string `json:"type"` - KeyType string `json:"keyType,omitempty"` - KeyPath string `json:"keyPath,omitempty"` - KeyData string `json:"keyData,omitempty"` - SignedIdentity json.RawMessage `json:"signedIdentity,omitempty"` -} - -// RepoMap map repo name to policycontent for each repo -type RepoMap map[string][]RepoContent - -// TransportsContent struct for content under "transports" -type TransportsContent map[string]RepoMap - -// RegistryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type RegistryConfiguration struct { - DefaultDocker *RegistryNamespace `json:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]RegistryNamespace `json:"docker"` +// Policy describes a basic trust policy configuration +type Policy struct { + Transport string `json:"transport"` + Name string `json:"name,omitempty"` + RepoName string `json:"repo_name,omitempty"` + Keys []string `json:"keys,omitempty"` + SignatureStore string `json:"sigstore,omitempty"` + Type string `json:"type"` + GPGId string `json:"gpg_id,omitempty"` } -// RegistryNamespace defines lookaside locations for a single namespace. -type RegistryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. +// PolicyDescription returns an user-focused description of the policy in policyPath and registries.d data from registriesDirPath. +func PolicyDescription(policyPath, registriesDirPath string) ([]*Policy, error) { + return policyDescriptionWithGPGIDReader(policyPath, registriesDirPath, getGPGIdFromKeyPath) } -// ShowOutput keep the fields for image trust show command -type ShowOutput struct { - Repo string - Trusttype string - GPGid string - Sigstore string -} - -// systemRegistriesDirPath is the path to registries.d. -const systemRegistriesDirPath = "/etc/containers/registries.d" - -// userRegistriesDir is the path to the per user registries.d. -var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") - -// DefaultPolicyPath returns a path to the default policy of the system. -func DefaultPolicyPath(sys *types.SystemContext) string { - systemDefaultPolicyPath := "/etc/containers/policy.json" - if sys != nil { - if sys.SignaturePolicyPath != "" { - return sys.SignaturePolicyPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } - } - return systemDefaultPolicyPath -} - -// RegistriesDirPath returns a path to registries.d -func RegistriesDirPath(sys *types.SystemContext) string { - if sys != nil && sys.RegistriesDirPath != "" { - return sys.RegistriesDirPath - } - userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) - if _, err := os.Stat(userRegistriesDirPath); err == nil { - return userRegistriesDirPath +// policyDescriptionWithGPGIDReader is PolicyDescription with a gpgIDReader parameter. It exists only to make testing easier. +func policyDescriptionWithGPGIDReader(policyPath, registriesDirPath string, idReader gpgIDReader) ([]*Policy, error) { + policyContentStruct, err := getPolicy(policyPath) + if err != nil { + return nil, fmt.Errorf("could not read trust policies: %w", err) } - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + res, err := getPolicyShowOutput(policyContentStruct, registriesDirPath, idReader) + if err != nil { + return nil, fmt.Errorf("could not show trust policies: %w", err) } - - return systemRegistriesDirPath + return res, nil } -// LoadAndMergeConfig loads configuration files in dirPath -func LoadAndMergeConfig(dirPath string) (*RegistryConfiguration, error) { - mergedConfig := RegistryConfiguration{Docker: map[string]RegistryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} +func getPolicyShowOutput(policyContentStruct policyContent, systemRegistriesDirPath string, idReader gpgIDReader) ([]*Policy, error) { + var output []*Policy - dir, err := os.Open(dirPath) + registryConfigs, err := loadAndMergeConfig(systemRegistriesDirPath) if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } return nil, err } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err + + if len(policyContentStruct.Default) > 0 { + template := Policy{ + Transport: "all", + Name: "* (default)", + RepoName: "default", } - var config RegistryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, errors.Wrapf(err, "error parsing %s", configPath) + output = append(output, descriptionsOfPolicyRequirements(policyContentStruct.Default, template, registryConfigs, "", idReader)...) + } + // FIXME: This should use x/exp/maps.Keys after we update to Go 1.18. + transports := []string{} + for t := range policyContentStruct.Transports { + transports = append(transports, t) + } + sort.Strings(transports) + for _, transport := range transports { + transval := policyContentStruct.Transports[transport] + if transport == "docker" { + transport = "repository" } - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath + + // FIXME: This should use x/exp/maps.Keys after we update to Go 1.18. + scopes := []string{} + for s := range transval { + scopes = append(scopes, s) } - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, - nsName, nsMergedFrom[nsName], configPath) + sort.Strings(scopes) + for _, repo := range scopes { + repoval := transval[repo] + template := Policy{ + Transport: transport, + Name: repo, + RepoName: repo, } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath + output = append(output, descriptionsOfPolicyRequirements(repoval, template, registryConfigs, repo, idReader)...) } } - return &mergedConfig, nil + return output, nil } -// HaveMatchRegistry checks if trust settings for the registry have been configured in yaml file -func HaveMatchRegistry(key string, registryConfigs *RegistryConfiguration) *RegistryNamespace { - searchKey := key - if !strings.Contains(searchKey, "/") { - val, exists := registryConfigs.Docker[searchKey] - if exists { - return &val - } - } - for range strings.Split(key, "/") { - val, exists := registryConfigs.Docker[searchKey] - if exists { - return &val - } - if strings.Contains(searchKey, "/") { - searchKey = searchKey[:strings.LastIndex(searchKey, "/")] +// descriptionsOfPolicyRequirements turns reqs into user-readable policy entries, with Transport/Name/Reponame coming from template, potentially looking up scope (which may be "") in registryConfigs. +func descriptionsOfPolicyRequirements(reqs []repoContent, template Policy, registryConfigs *registryConfiguration, scope string, idReader gpgIDReader) []*Policy { + res := []*Policy{} + + var lookasidePath string + registryNamespace := registriesDConfigurationForScope(registryConfigs, scope) + if registryNamespace != nil { + if registryNamespace.Lookaside != "" { + lookasidePath = registryNamespace.Lookaside + } else { // incl. registryNamespace.SigStore == "" + lookasidePath = registryNamespace.SigStore } } - return registryConfigs.DefaultDocker -} - -// CreateTmpFile creates a temp file under dir and writes the content into it -func CreateTmpFile(dir, pattern string, content []byte) (string, error) { - tmpfile, err := ioutil.TempFile(dir, pattern) - if err != nil { - return "", err - } - defer tmpfile.Close() - - if _, err := tmpfile.Write(content); err != nil { - return "", err - } - return tmpfile.Name(), nil -} - -// GetGPGIdFromKeyPath return user keyring from key path -func GetGPGIdFromKeyPath(path string) []string { - cmd := exec.Command("gpg2", "--with-colons", path) - results, err := cmd.Output() - if err != nil { - logrus.Errorf("Getting key identity: %s", err) - return nil - } - return parseUids(results) -} -// GetGPGIdFromKeyData return user keyring from keydata -func GetGPGIdFromKeyData(key string) []string { - decodeKey, err := base64.StdEncoding.DecodeString(key) - if err != nil { - logrus.Errorf("%s, error decoding key data", err) - return nil - } - tmpfileName, err := CreateTmpFile("", "", decodeKey) - if err != nil { - logrus.Errorf("Creating key date temp file %s", err) - } - defer os.Remove(tmpfileName) - return GetGPGIdFromKeyPath(tmpfileName) -} + for _, repoele := range reqs { + entry := template + entry.Type = trustTypeDescription(repoele.Type) -func parseUids(colonDelimitKeys []byte) []string { - var parseduids []string - scanner := bufio.NewScanner(bytes.NewReader(colonDelimitKeys)) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "uid:") || strings.HasPrefix(line, "pub:") { - uid := strings.Split(line, ":")[9] - if uid == "" { - continue + var gpgIDString string + switch repoele.Type { + case "signedBy": + uids := []string{} + if len(repoele.KeyPath) > 0 { + uids = append(uids, idReader(repoele.KeyPath)...) } - parseduid := uid - if strings.Contains(uid, "<") && strings.Contains(uid, ">") { - parseduid = strings.SplitN(strings.SplitAfterN(uid, "<", 2)[1], ">", 2)[0] + for _, path := range repoele.KeyPaths { + uids = append(uids, idReader(path)...) } - parseduids = append(parseduids, parseduid) + if len(repoele.KeyData) > 0 { + uids = append(uids, getGPGIdFromKeyData(idReader, repoele.KeyData)...) + } + gpgIDString = strings.Join(uids, ", ") + + case "sigstoreSigned": + gpgIDString = "N/A" // We could potentially return key fingerprints here, but they would not be _GPG_ fingerprints. } + entry.GPGId = gpgIDString + entry.SignatureStore = lookasidePath // We do this even for sigstoreSigned and things like type: reject, to show that the sigstore is being read. + res = append(res, &entry) } - return parseduids -} -// GetPolicy parse policy.json into PolicyContent struct -func GetPolicy(policyPath string) (PolicyContent, error) { - var policyContentStruct PolicyContent - policyContent, err := ioutil.ReadFile(policyPath) - if err != nil { - return policyContentStruct, errors.Wrap(err, "unable to read policy file") - } - if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { - return policyContentStruct, errors.Wrapf(err, "could not parse trust policies from %s", policyPath) - } - return policyContentStruct, nil + return res } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/filters.go b/vendor/github.com/containers/podman/v4/pkg/util/filters.go index 05ba4f82cdb..104b9c3c2d8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/filters.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/filters.go @@ -2,21 +2,20 @@ package util import ( "encoding/json" + "errors" "fmt" "net/http" - "path/filepath" "strings" "time" "github.com/containers/podman/v4/pkg/timetype" - "github.com/pkg/errors" ) // ComputeUntilTimestamp extracts until timestamp from filters func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { invalid := time.Time{} if len(filterValues) != 1 { - return invalid, errors.Errorf("specify exactly one timestamp for until") + return invalid, errors.New("specify exactly one timestamp for until") } ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) if err != nil { @@ -94,35 +93,3 @@ func PrepareFilters(r *http.Request) (*map[string][]string, error) { } return &filterMap, nil } - -func matchPattern(pattern string, value string) bool { - if strings.Contains(pattern, "*") { - filter := fmt.Sprintf("*%s*", pattern) - filter = strings.ReplaceAll(filter, string(filepath.Separator), "|") - newName := strings.ReplaceAll(value, string(filepath.Separator), "|") - match, _ := filepath.Match(filter, newName) - return match - } - return false -} - -// MatchLabelFilters matches labels and returns true if they are valid -func MatchLabelFilters(filterValues []string, labels map[string]string) bool { -outer: - for _, filterValue := range filterValues { - filterArray := strings.SplitN(filterValue, "=", 2) - filterKey := filterArray[0] - if len(filterArray) > 1 { - filterValue = filterArray[1] - } else { - filterValue = "" - } - for labelKey, labelValue := range labels { - if ((labelKey == filterKey) || matchPattern(filterKey, labelKey)) && (filterValue == "" || labelValue == filterValue) { - continue outer - } - } - return false - } - return true -} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/kube.go b/vendor/github.com/containers/podman/v4/pkg/util/kube.go index 1255cdfc59b..1a70ed05188 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/kube.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/kube.go @@ -13,4 +13,6 @@ const ( VolumeGIDAnnotation = "volume.podman.io/gid" // Kube annotation for podman volume mount options. VolumeMountOptsAnnotation = "volume.podman.io/mount-options" + // Kube annotation for podman volume import source. + VolumeImportSourceAnnotation = "volume.podman.io/import-source" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go index e37394619e3..49fd5b4679c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go @@ -1,16 +1,16 @@ package util import ( + "errors" + "fmt" "strings" - - "github.com/pkg/errors" ) var ( // ErrBadMntOption indicates that an invalid mount option was passed. - ErrBadMntOption = errors.Errorf("invalid mount option") + ErrBadMntOption = errors.New("invalid mount option") // ErrDupeMntOption indicates that a duplicate mount option was passed. - ErrDupeMntOption = errors.Errorf("duplicate mount option passed") + ErrDupeMntOption = errors.New("duplicate mount option passed") ) type defaultMountOptions struct { @@ -25,7 +25,7 @@ type defaultMountOptions struct { // The sourcePath variable, if not empty, contains a bind mount source. func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy bool ) newOptions := make([]string, 0, len(options)) @@ -47,7 +47,7 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string if strings.HasPrefix(splitOpt[0], "idmap") { if foundIdmap { - return nil, errors.Wrapf(ErrDupeMntOption, "the 'idmap' option can only be set once") + return nil, fmt.Errorf("the 'idmap' option can only be set once: %w", ErrDupeMntOption) } foundIdmap = true newOptions = append(newOptions, opt) @@ -55,6 +55,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string } switch splitOpt[0] { + case "copy", "nocopy": + if foundCopy { + return nil, fmt.Errorf("only one of 'nocopy' and 'copy' can be used: %w", ErrDupeMntOption) + } + foundCopy = true case "O": foundOverlay = true case "volume-opt": @@ -62,51 +67,51 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string newOptions = append(newOptions, opt) case "exec", "noexec": if foundExec { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used") + return nil, fmt.Errorf("only one of 'noexec' and 'exec' can be used: %w", ErrDupeMntOption) } foundExec = true case "suid", "nosuid": if foundSuid { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used") + return nil, fmt.Errorf("only one of 'nosuid' and 'suid' can be used: %w", ErrDupeMntOption) } foundSuid = true case "nodev", "dev": if foundDev { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used") + return nil, fmt.Errorf("only one of 'nodev' and 'dev' can be used: %w", ErrDupeMntOption) } foundDev = true case "rw", "ro": if foundWrite { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used") + return nil, fmt.Errorf("only one of 'rw' and 'ro' can be used: %w", ErrDupeMntOption) } foundWrite = true case "private", "rprivate", "slave", "rslave", "shared", "rshared", "unbindable", "runbindable": if foundProp { - return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used") + return nil, fmt.Errorf("only one root propagation mode can be used: %w", ErrDupeMntOption) } foundProp = true case "size": if !isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'size' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundSize { - return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified") + return nil, fmt.Errorf("only one tmpfs size can be specified: %w", ErrDupeMntOption) } foundSize = true case "mode": if !isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'mode' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundMode { - return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified") + return nil, fmt.Errorf("only one tmpfs mode can be specified: %w", ErrDupeMntOption) } foundMode = true case "tmpcopyup": if !isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'tmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundCopyUp { - return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once") + return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) } foundCopyUp = true case "consistency": @@ -115,37 +120,37 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string continue case "notmpcopyup": if !isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'notmpcopyup' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundCopyUp { - return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once") + return nil, fmt.Errorf("the 'tmpcopyup' or 'notmpcopyup' option can only be set once: %w", ErrDupeMntOption) } foundCopyUp = true // do not propagate notmpcopyup to the OCI runtime continue case "bind", "rbind": if isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundBind { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used") + return nil, fmt.Errorf("only one of 'rbind' and 'bind' can be used: %w", ErrDupeMntOption) } foundBind = true case "z", "Z": if isTmpfs { - return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts") + return nil, fmt.Errorf("the 'z' and 'Z' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) } if foundZ { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used") + return nil, fmt.Errorf("only one of 'z' and 'Z' can be used: %w", ErrDupeMntOption) } foundZ = true case "U": if foundU { - return nil, errors.Wrapf(ErrDupeMntOption, "the 'U' option can only be set once") + return nil, fmt.Errorf("the 'U' option can only be set once: %w", ErrDupeMntOption) } foundU = true default: - return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt) + return nil, fmt.Errorf("unknown mount option %q: %w", opt, ErrBadMntOption) } newOptions = append(newOptions, opt) } @@ -182,11 +187,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string func ParseDriverOpts(option string) (string, string, error) { token := strings.SplitN(option, "=", 2) if len(token) != 2 { - return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) } opt := strings.SplitN(token[1], "=", 2) if len(opt) != 2 { - return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) } return opt[0], opt[1], nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils.go b/vendor/github.com/containers/podman/v4/pkg/util/utils.go index 1b766333097..e7952a0bc3e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils.go @@ -1,7 +1,7 @@ package util import ( - "encoding/json" + "errors" "fmt" "io/fs" "math" @@ -19,15 +19,15 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/util" "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + enchelpers "github.com/containers/ocicrypt/helpers" "github.com/containers/podman/v4/pkg/errorhandling" "github.com/containers/podman/v4/pkg/namespaces" "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/pkg/signal" "github.com/containers/storage/pkg/idtools" stypes "github.com/containers/storage/types" - v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/term" ) @@ -68,7 +68,7 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { fmt.Print("Password: ") termPassword, err := term.ReadPassword(0) if err != nil { - return nil, errors.Wrapf(err, "could not read password from terminal") + return nil, fmt.Errorf("could not read password from terminal: %w", err) } password = string(termPassword) } @@ -79,7 +79,7 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { }, nil } -// StringInSlice is depracated, use containers/common/pkg/util/StringInSlice +// StringInSlice is deprecated, use containers/common/pkg/util/StringInSlice func StringInSlice(s string, sl []string) bool { return util.StringInSlice(s, sl) } @@ -95,236 +95,6 @@ func StringMatchRegexSlice(s string, re []string) bool { return false } -// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported -// by containers/image, but containing additional fields that are not supported -// by OCIv1 (but are by Docker v2) - notably OnBuild. -type ImageConfig struct { - v1.ImageConfig - OnBuild []string -} - -// GetImageConfig produces a v1.ImageConfig from the --change flag that is -// accepted by several Podman commands. It accepts a (limited subset) of -// Dockerfile instructions. -func GetImageConfig(changes []string) (ImageConfig, error) { - // Valid changes: - // USER - // EXPOSE - // ENV - // ENTRYPOINT - // CMD - // VOLUME - // WORKDIR - // LABEL - // STOPSIGNAL - // ONBUILD - - config := ImageConfig{} - - for _, change := range changes { - // First, let's assume proper Dockerfile format - space - // separator between instruction and value - split := strings.SplitN(change, " ", 2) - - if len(split) != 2 { - split = strings.SplitN(change, "=", 2) - if len(split) != 2 { - return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change) - } - } - - outerKey := strings.ToUpper(strings.TrimSpace(split[0])) - value := strings.TrimSpace(split[1]) - switch outerKey { - case "USER": - // Assume literal contents are the user. - if value == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change) - } - config.User = value - case "EXPOSE": - // EXPOSE is either [portnum] or - // [portnum]/[proto] - // Protocol must be "tcp" or "udp" - splitPort := strings.Split(value, "/") - if len(splitPort) > 2 { - return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) - } - portNum, err := strconv.Atoi(splitPort[0]) - if err != nil { - return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change) - } - if portNum > 65535 || portNum <= 0 { - return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) - } - proto := "tcp" - if len(splitPort) > 1 { - testProto := strings.ToLower(splitPort[1]) - switch testProto { - case "tcp", "udp": - proto = testProto - default: - return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) - } - } - if config.ExposedPorts == nil { - config.ExposedPorts = make(map[string]struct{}) - } - config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{} - case "ENV": - // Format is either: - // ENV key=value - // ENV key=value key=value ... - // ENV key value - // Both keys and values can be surrounded by quotes to group them. - // For now: we only support key=value - // We will attempt to strip quotation marks if present. - - var ( - key, val string - ) - - splitEnv := strings.SplitN(value, "=", 2) - key = splitEnv[0] - // We do need a key - if key == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change) - } - // Perfectly valid to not have a value - if len(splitEnv) == 2 { - val = splitEnv[1] - } - - if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { - key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) - } - if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { - val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) - } - config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) - case "ENTRYPOINT": - // Two valid forms. - // First, JSON array. - // Second, not a JSON array - we interpret this as an - // argument to `sh -c`, unless empty, in which case we - // just use a blank entrypoint. - testUnmarshal := []string{} - if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { - // It ain't valid JSON, so assume it's an - // argument to sh -c if not empty. - if value != "" { - config.Entrypoint = []string{"/bin/sh", "-c", value} - } else { - config.Entrypoint = []string{} - } - } else { - // Valid JSON - config.Entrypoint = testUnmarshal - } - case "CMD": - // Same valid forms as entrypoint. - // However, where ENTRYPOINT assumes that 'ENTRYPOINT ' - // means no entrypoint, CMD assumes it is 'sh -c' with - // no third argument. - testUnmarshal := []string{} - if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { - // It ain't valid JSON, so assume it's an - // argument to sh -c. - // Only include volume if it's not "" - config.Cmd = []string{"/bin/sh", "-c"} - if value != "" { - config.Cmd = append(config.Cmd, value) - } - } else { - // Valid JSON - config.Cmd = testUnmarshal - } - case "VOLUME": - // Either a JSON array or a set of space-separated - // paths. - // Acts rather similar to ENTRYPOINT and CMD, but always - // appends rather than replacing, and no sh -c prepend. - testUnmarshal := []string{} - if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { - // Not valid JSON, so split on spaces - testUnmarshal = strings.Split(value, " ") - } - if len(testUnmarshal) == 0 { - return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) - } - for _, vol := range testUnmarshal { - if vol == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change) - } - if config.Volumes == nil { - config.Volumes = make(map[string]struct{}) - } - config.Volumes[vol] = struct{}{} - } - case "WORKDIR": - // This can be passed multiple times. - // Each successive invocation is treated as relative to - // the previous one - so WORKDIR /A, WORKDIR b, - // WORKDIR c results in /A/b/c - // Just need to check it's not empty... - if value == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) - } - config.WorkingDir = filepath.Join(config.WorkingDir, value) - case "LABEL": - // Same general idea as ENV, but we no longer allow " " - // as a separator. - // We didn't do that for ENV either, so nice and easy. - // Potentially problematic: LABEL might theoretically - // allow an = in the key? If people really do this, we - // may need to investigate more advanced parsing. - var ( - key, val string - ) - - splitLabel := strings.SplitN(value, "=", 2) - // Unlike ENV, LABEL must have a value - if len(splitLabel) != 2 { - return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change) - } - key = splitLabel[0] - val = splitLabel[1] - - if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { - key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) - } - if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { - val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) - } - // Check key after we strip quotations - if key == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change) - } - if config.Labels == nil { - config.Labels = make(map[string]string) - } - config.Labels[key] = val - case "STOPSIGNAL": - // Check the provided signal for validity. - killSignal, err := ParseSignal(value) - if err != nil { - return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change) - } - config.StopSignal = fmt.Sprintf("%d", killSignal) - case "ONBUILD": - // Onbuild always appends. - if value == "" { - return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change) - } - config.OnBuild = append(config.OnBuild, value) - default: - return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey) - } - } - - return config, nil -} - // ParseSignal parses and validates a signal name or number. func ParseSignal(rawSignal string) (syscall.Signal, error) { // Strip off leading dash, to allow -1 or -HUP @@ -336,13 +106,13 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) { } // 64 is SIGRTMAX; wish we could get this from a standard Go library if sig < 1 || sig > 64 { - return -1, errors.Errorf("valid signals are 1 through 64") + return -1, errors.New("valid signals are 1 through 64") } return sig, nil } // GetKeepIDMapping returns the mappings and the user to use when keep-id is used -func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) { +func GetKeepIDMapping(opts *namespaces.KeepIDUserNsOptions) (*stypes.IDMappingOptions, int, int, error) { if !rootless.IsRootless() { return nil, -1, -1, errors.New("keep-id is only supported in rootless mode") } @@ -359,14 +129,18 @@ func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) { uid := rootless.GetRootlessUID() gid := rootless.GetRootlessGID() + if opts.UID != nil { + uid = int(*opts.UID) + } + if opts.GID != nil { + gid = int(*opts.GID) + } - uids, gids, err := rootless.GetConfiguredMappings() + uids, gids, err := rootless.GetConfiguredMappings(true) if err != nil { - return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") - } - if len(uids) == 0 || len(gids) == 0 { - return nil, -1, -1, errors.Wrapf(err, "keep-id requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + return nil, -1, -1, fmt.Errorf("cannot read mappings: %w", err) } + maxUID, maxGID := 0, 0 for _, u := range uids { maxUID += u.Size @@ -377,13 +151,17 @@ func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) { options.UIDMap, options.GIDMap = nil, nil - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + if len(uids) > 0 { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + } options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) if maxUID > uid { options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) } - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + if len(gids) > 0 { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + } options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) if maxGID > gid { options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) @@ -401,12 +179,12 @@ func GetNoMapMapping() (*stypes.IDMappingOptions, int, int, error) { HostUIDMapping: false, HostGIDMapping: false, } - uids, gids, err := rootless.GetConfiguredMappings() + uids, gids, err := rootless.GetConfiguredMappings(false) if err != nil { - return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") + return nil, -1, -1, fmt.Errorf("cannot read mappings: %w", err) } if len(uids) == 0 || len(gids) == 0 { - return nil, -1, -1, errors.Wrapf(err, "nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + return nil, -1, -1, fmt.Errorf("nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly: %w", err) } options.UIDMap, options.GIDMap = nil, nil uid, gid := 0, 0 @@ -566,7 +344,7 @@ func ParseInputTime(inputTime string, since bool) (time.Time, error) { // input might be a duration duration, err := time.ParseDuration(inputTime) if err != nil { - return time.Time{}, errors.Errorf("unable to interpret time value") + return time.Time{}, errors.New("unable to interpret time value") } if since { return time.Now().Add(-duration), nil @@ -607,7 +385,7 @@ func HomeDir() (string, error) { if home == "" { usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID())) if err != nil { - return "", errors.Wrapf(err, "unable to resolve HOME directory") + return "", fmt.Errorf("unable to resolve HOME directory: %w", err) } home = usr.HomeDir } @@ -645,12 +423,12 @@ func ValidateSysctls(strSlice []string) (map[string]string, error) { foundMatch := false arr := strings.Split(val, "=") if len(arr) < 2 { - return nil, errors.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val) + return nil, fmt.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val) } trimmed := fmt.Sprintf("%s=%s", strings.TrimSpace(arr[0]), strings.TrimSpace(arr[1])) if trimmed != val { - return nil, errors.Errorf("'%s' is invalid, extra spaces found", val) + return nil, fmt.Errorf("'%s' is invalid, extra spaces found", val) } if validSysctlMap[arr[0]] { @@ -666,7 +444,7 @@ func ValidateSysctls(strSlice []string) (map[string]string, error) { } } if !foundMatch { - return nil, errors.Errorf("sysctl '%s' is not allowed", arr[0]) + return nil, fmt.Errorf("sysctl '%s' is not allowed", arr[0]) } } return sysctl, nil @@ -676,18 +454,15 @@ func DefaultContainerConfig() *config.Config { return containerConfig } -func CreateCidFile(cidfile string, id string) error { - cidFile, err := OpenExclusiveFile(cidfile) +func CreateIDFile(path string, id string) error { + idFile, err := os.Create(path) if err != nil { - if os.IsExist(err) { - return errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", cidfile) - } - return errors.Errorf("opening cidfile %s", cidfile) + return fmt.Errorf("creating idfile: %w", err) } - if _, err = cidFile.WriteString(id); err != nil { - logrus.Error(err) + defer idFile.Close() + if _, err = idFile.WriteString(id); err != nil { + return fmt.Errorf("writing idfile: %w", err) } - cidFile.Close() return nil } @@ -728,7 +503,7 @@ func IDtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxI } func LookupUser(name string) (*user.User, error) { - // Assume UID look up first, if it fails lookup by username + // Assume UID lookup first, if it fails look up by username if u, err := user.LookupId(name); err == nil { return u, nil } @@ -751,3 +526,37 @@ func SizeOfPath(path string) (uint64, error) { }) return size, err } + +// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure +func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) { + var encLayers *[]int + var encConfig *encconfig.EncryptConfig + + if len(encryptionKeys) > 0 { + // encryption + encLayers = &encryptLayers + ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{}) + if err != nil { + return nil, nil, fmt.Errorf("invalid encryption keys: %w", err) + } + cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc}) + encConfig = cc.EncryptConfig + } + return encConfig, encLayers, nil +} + +// DecryptConfig translates decryptionKeys into a DescriptionConfig structure +func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) { + var decryptConfig *encconfig.DecryptConfig + if len(decryptionKeys) > 0 { + // decryption + dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys) + if err != nil { + return nil, fmt.Errorf("invalid decryption keys: %w", err) + } + cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc}) + decryptConfig = cc.DecryptConfig + } + + return decryptConfig, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go index 66ae85e9cc9..3a2e587dfe0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go @@ -4,7 +4,7 @@ package util import ( - "github.com/pkg/errors" + "errors" ) func GetContainerPidInformationDescriptors() ([]string, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go new file mode 100644 index 00000000000..621bb436ee1 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go @@ -0,0 +1,34 @@ +//go:build freebsd +// +build freebsd + +package util + +import ( + "github.com/opencontainers/runtime-tools/generate" +) + +func GetContainerPidInformationDescriptors() ([]string, error) { + // These are chosen to match the set of AIX format descriptors + // supported in Linux - FreeBSD ps does support (many) others. + return []string{ + "args", + "comm", + "etime", + "group", + "nice", + "pcpu", + "pgid", + "pid", + "ppid", + "rgroup", + "ruser", + "time", + "tty", + "user", + "vsz", + }, nil +} + +func AddPrivilegedDevices(g *generate.Generator, systemdMode bool) error { + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go index 0b21bf3c537..c094beac75f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go @@ -1,15 +1,26 @@ package util import ( + "errors" "fmt" "io/fs" "os" "path/filepath" + "strconv" + "strings" "syscall" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/psgo" - "github.com/pkg/errors" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + errNotADevice = errors.New("not a device node") ) // GetContainerPidInformationDescriptors returns a string slice of all supported @@ -42,7 +53,7 @@ func FindDeviceNodes() (map[string]string, error) { // We are a device node. Get major/minor. sysstat, ok := info.Sys().(*syscall.Stat_t) if !ok { - return errors.Errorf("Could not convert stat output for use") + return errors.New("could not convert stat output for use") } // We must typeconvert sysstat.Rdev from uint64->int to avoid constant overflow rdev := int(sysstat.Rdev) @@ -59,3 +70,178 @@ func FindDeviceNodes() (map[string]string, error) { return nodes, nil } + +// isVirtualConsoleDevice returns true if path is a virtual console device +// (/dev/tty\d+). +// The passed path must be clean (filepath.Clean). +func isVirtualConsoleDevice(path string) bool { + /* + Virtual consoles are of the form `/dev/tty\d+`, any other device such as + /dev/tty, ttyUSB0, or ttyACM0 should not be matched. + See `man 4 console` for more information. + */ + suffix := strings.TrimPrefix(path, "/dev/tty") + if suffix == path || suffix == "" { + return false + } + + // 16bit because, max. supported TTY devices is 512 in Linux 6.1.5. + _, err := strconv.ParseUint(suffix, 10, 16) + return err == nil +} + +func AddPrivilegedDevices(g *generate.Generator, systemdMode bool) error { + hostDevices, err := getDevices("/dev") + if err != nil { + return err + } + g.ClearLinuxDevices() + + if rootless.IsRootless() { + mounts := make(map[string]interface{}) + for _, m := range g.Mounts() { + mounts[m.Destination] = true + } + newMounts := []spec.Mount{} + for _, d := range hostDevices { + devMnt := spec.Mount{ + Destination: d.Path, + Type: define.TypeBind, + Source: d.Path, + Options: []string{"slave", "nosuid", "noexec", "rw", "rbind"}, + } + + /* The following devices should not be mounted in rootless containers: + * + * /dev/ptmx: The host-provided /dev/ptmx should not be shared to + * the rootless containers for security reasons, and + * the container runtime will create it for us + * anyway (ln -s /dev/pts/ptmx /dev/ptmx); + * /dev/tty and + * /dev/tty[0-9]+: Prevent the container from taking over the host's + * virtual consoles, even when not in systemd mode + * for backwards compatibility. + */ + if d.Path == "/dev/ptmx" || d.Path == "/dev/tty" || isVirtualConsoleDevice(d.Path) { + continue + } + if _, found := mounts[d.Path]; found { + continue + } + newMounts = append(newMounts, devMnt) + } + g.Config.Mounts = append(newMounts, g.Config.Mounts...) + if g.Config.Linux.Resources != nil { + g.Config.Linux.Resources.Devices = nil + } + } else { + for _, d := range hostDevices { + /* Restrict access to the virtual consoles *only* when running + * in systemd mode to improve backwards compatibility. See + * https://github.com/containers/podman/issues/15878. + * + * NOTE: May need revisiting in the future to drop the systemd + * condition if more use cases end up breaking the virtual terminals + * of people who specifically disable the systemd mode. It would + * also provide a more consistent behaviour between rootless and + * rootfull containers. + */ + if systemdMode && isVirtualConsoleDevice(d.Path) { + continue + } + g.AddDevice(d) + } + // Add resources device - need to clear the existing one first. + if g.Config.Linux.Resources != nil { + g.Config.Linux.Resources.Devices = nil + } + g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm") + } + + return nil +} + +// based on getDevices from runc (libcontainer/devices/devices.go) +func getDevices(path string) ([]spec.LinuxDevice, error) { + files, err := os.ReadDir(path) + if err != nil { + if rootless.IsRootless() && os.IsPermission(err) { + return nil, nil + } + return nil, err + } + out := []spec.LinuxDevice{} + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + if sub != nil { + out = append(out, sub...) + } + continue + } + case f.Name() == "console": + continue + case f.Type()&os.ModeSymlink != 0: + continue + } + + device, err := DeviceFromPath(filepath.Join(path, f.Name())) + if err != nil { + if err == errNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + return nil, err + } + out = append(out, *device) + } + return out, nil +} + +// Copied from github.com/opencontainers/runc/libcontainer/devices +// Given the path to a device look up the information about a linux device +func DeviceFromPath(path string) (*spec.LinuxDevice, error) { + var stat unix.Stat_t + err := unix.Lstat(path, &stat) + if err != nil { + return nil, err + } + var ( + devType string + mode = stat.Mode + devNumber = uint64(stat.Rdev) //nolint: unconvert + m = os.FileMode(mode) + ) + + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + case mode&unix.S_IFIFO == unix.S_IFIFO: + devType = "p" + default: + return nil, errNotADevice + } + + return &spec.LinuxDevice{ + Type: devType, + Path: path, + FileMode: &m, + UID: &stat.Uid, + GID: &stat.Gid, + Major: int64(unix.Major(devNumber)), + Minor: int64(unix.Minor(devNumber)), + }, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go index 50e4b1b7b17..90a2ecf862e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go @@ -7,13 +7,13 @@ package util // should work to take darwin from this import ( + "errors" "fmt" "os" "path/filepath" "syscall" "github.com/containers/podman/v4/pkg/rootless" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,6 +27,12 @@ func GetRuntimeDir() (string, error) { rootlessRuntimeDirOnce.Do(func() { runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + + if runtimeDir != "" { + rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) + return + } + uid := fmt.Sprintf("%d", rootless.GetRootlessUID()) if runtimeDir == "" { tmpDir := filepath.Join("/run", "user", uid) @@ -51,12 +57,12 @@ func GetRuntimeDir() (string, error) { if runtimeDir == "" { home := os.Getenv("HOME") if home == "" { - rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty") + rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") return } resolvedHome, err := filepath.EvalSymlinks(home) if err != nil { - rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home) + rootlessRuntimeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) return } runtimeDir = filepath.Join(resolvedHome, "rundir") @@ -80,7 +86,7 @@ func GetRootlessConfigHomeDir() (string, error) { home := os.Getenv("HOME") resolvedHome, err := filepath.EvalSymlinks(home) if err != nil { - rootlessConfigHomeDirError = errors.Wrapf(err, "cannot resolve %s", home) + rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) return } tmpDir := filepath.Join(resolvedHome, ".config") @@ -115,7 +121,7 @@ func GetRootlessPauseProcessPidPath() (string, error) { // files. func GetRootlessPauseProcessPidPathGivenDir(libpodTmpDir string) (string, error) { if libpodTmpDir == "" { - return "", errors.Errorf("must provide non-empty temporary directory") + return "", errors.New("must provide non-empty temporary directory") } return filepath.Join(libpodTmpDir, "pause.pid"), nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go index 8963464933d..26fb7adf923 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go @@ -1,13 +1,11 @@ -//go:build darwin || windows -// +build darwin windows +//go:build darwin || windows || freebsd +// +build darwin windows freebsd package util -import ( - "github.com/pkg/errors" -) +import "errors" // FindDeviceNodes is not implemented anywhere except Linux. func FindDeviceNodes() (map[string]string, error) { - return nil, errors.Errorf("not supported on non-Linux OSes") + return nil, errors.New("not supported on non-Linux OSes") } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go index b91680f7a4b..703e5472a6f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go @@ -4,35 +4,36 @@ package util import ( + "errors" + "fmt" "path/filepath" "github.com/containers/storage/pkg/homedir" - "github.com/pkg/errors" ) var errNotImplemented = errors.New("not yet implemented") // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 unified mode. func IsCgroup2UnifiedMode() (bool, error) { - return false, errors.Wrap(errNotImplemented, "IsCgroup2Unified") + return false, fmt.Errorf("IsCgroup2Unified: %w", errNotImplemented) } // GetContainerPidInformationDescriptors returns a string slice of all supported // format descriptors of GetContainerPidInformation. func GetContainerPidInformationDescriptors() ([]string, error) { - return nil, errors.Wrap(errNotImplemented, "GetContainerPidInformationDescriptors") + return nil, fmt.Errorf("GetContainerPidInformationDescriptors: %w", errNotImplemented) } // GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for // the pause process func GetRootlessPauseProcessPidPath() (string, error) { - return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath") + return "", fmt.Errorf("GetRootlessPauseProcessPidPath: %w", errNotImplemented) } // GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for // the pause process func GetRootlessPauseProcessPidPathGivenDir(unused string) (string, error) { - return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath") + return "", fmt.Errorf("GetRootlessPauseProcessPidPath: %w", errNotImplemented) } // GetRuntimeDir returns the runtime directory diff --git a/vendor/github.com/containers/podman/v4/utils/ports.go b/vendor/github.com/containers/podman/v4/utils/ports.go deleted file mode 100644 index 57a6f82759e..00000000000 --- a/vendor/github.com/containers/podman/v4/utils/ports.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "net" - "strconv" - - "github.com/pkg/errors" -) - -// Find a random, open port on the host. -func GetRandomPort() (int, error) { - l, err := net.Listen("tcp", ":0") - if err != nil { - return 0, errors.Wrapf(err, "unable to get free TCP port") - } - defer l.Close() - _, randomPort, err := net.SplitHostPort(l.Addr().String()) - if err != nil { - return 0, errors.Wrapf(err, "unable to determine free port") - } - rp, err := strconv.Atoi(randomPort) - if err != nil { - return 0, errors.Wrapf(err, "unable to convert random port to int") - } - return rp, nil -} diff --git a/vendor/github.com/containers/podman/v4/utils/utils.go b/vendor/github.com/containers/podman/v4/utils/utils.go deleted file mode 100644 index fd66ac2ed37..00000000000 --- a/vendor/github.com/containers/podman/v4/utils/utils.go +++ /dev/null @@ -1,269 +0,0 @@ -package utils - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "strconv" - "strings" - "sync" - - "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/storage/pkg/archive" - "github.com/godbus/dbus/v5" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ExecCmd executes a command with args and returns its output as a string along -// with an error, if any. -func ExecCmd(name string, args ...string) (string, error) { - cmd := exec.Command(name, args...) - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - return "", fmt.Errorf("`%v %v` failed: %v %v (%v)", name, strings.Join(args, " "), stderr.String(), stdout.String(), err) - } - - return stdout.String(), nil -} - -// ExecCmdWithStdStreams execute a command with the specified standard streams. -func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []string, name string, args ...string) error { - cmd := exec.Command(name, args...) - cmd.Stdin = stdin - cmd.Stdout = stdout - cmd.Stderr = stderr - cmd.Env = env - - err := cmd.Run() - if err != nil { - return fmt.Errorf("`%v %v` failed: %v", name, strings.Join(args, " "), err) - } - - return nil -} - -// ErrDetach is an error indicating that the user manually detached from the -// container. -var ErrDetach = define.ErrDetach - -// CopyDetachable is similar to io.Copy but support a detach key sequence to break out. -func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - return 0, ErrDetach - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - if er != io.EOF { - err = er - } - break - } - } - return written, err -} - -// UntarToFileSystem untars an os.file of a tarball to a destination in the filesystem -func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOptions) error { - logrus.Debugf("untarring %s", tarball.Name()) - return archive.Untar(tarball, dest, options) -} - -// Creates a new tar file and wrties bytes from io.ReadCloser -func CreateTarFromSrc(source string, dest string) error { - file, err := os.Create(dest) - if err != nil { - return errors.Wrapf(err, "Could not create tarball file '%s'", dest) - } - defer file.Close() - return TarToFilesystem(source, file) -} - -// TarToFilesystem creates a tarball from source and writes to an os.file -// provided -func TarToFilesystem(source string, tarball *os.File) error { - tb, err := Tar(source) - if err != nil { - return err - } - _, err = io.Copy(tarball, tb) - if err != nil { - return err - } - logrus.Debugf("wrote tarball file %s", tarball.Name()) - return nil -} - -// Tar creates a tarball from source and returns a readcloser of it -func Tar(source string) (io.ReadCloser, error) { - logrus.Debugf("creating tarball of %s", source) - return archive.Tar(source, archive.Uncompressed) -} - -// RemoveScientificNotationFromFloat returns a float without any -// scientific notation if the number has any. -// golang does not handle conversion of float64s that have scientific -// notation in them and otherwise stinks. please replace this if you have -// a better implementation. -func RemoveScientificNotationFromFloat(x float64) (float64, error) { - bigNum := strconv.FormatFloat(x, 'g', -1, 64) - breakPoint := strings.IndexAny(bigNum, "Ee") - if breakPoint > 0 { - bigNum = bigNum[:breakPoint] - } - result, err := strconv.ParseFloat(bigNum, 64) - if err != nil { - return x, errors.Wrapf(err, "unable to remove scientific number from calculations") - } - return result, nil -} - -var ( - runsOnSystemdOnce sync.Once - runsOnSystemd bool -) - -// RunsOnSystemd returns whether the system is using systemd -func RunsOnSystemd() bool { - runsOnSystemdOnce.Do(func() { - initCommand, err := ioutil.ReadFile("/proc/1/comm") - // On errors, default to systemd - runsOnSystemd = err != nil || strings.TrimRight(string(initCommand), "\n") == "systemd" - }) - return runsOnSystemd -} - -func moveProcessPIDFileToScope(pidPath, slice, scope string) error { - data, err := ioutil.ReadFile(pidPath) - if err != nil { - // do not raise an error if the file doesn't exist - if os.IsNotExist(err) { - return nil - } - return errors.Wrapf(err, "cannot read pid file %s", pidPath) - } - pid, err := strconv.ParseUint(string(data), 10, 0) - if err != nil { - return errors.Wrapf(err, "cannot parse pid file %s", pidPath) - } - - return moveProcessToScope(int(pid), slice, scope) -} - -func moveProcessToScope(pid int, slice, scope string) error { - err := RunUnderSystemdScope(pid, slice, scope) - // If the PID is not valid anymore, do not return an error. - if dbusErr, ok := err.(dbus.Error); ok { - if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { - return nil - } - } - return err -} - -// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns -// into a different scope so that systemd does not kill it with a container. -func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { - randBytes := make([]byte, 4) - _, err := rand.Read(randBytes) - if err != nil { - return err - } - return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) -} - -// MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to -// a separate scope. -func MovePauseProcessToScope(pausePidPath string) { - var err error - - for i := 0; i < 10; i++ { - randBytes := make([]byte, 4) - _, err = rand.Read(randBytes) - if err != nil { - logrus.Errorf("failed to read random bytes: %v", err) - continue - } - err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) - if err == nil { - return - } - } - - if err != nil { - unified, err2 := cgroups.IsCgroup2UnifiedMode() - if err2 != nil { - logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) - } - if RunsOnSystemd() && unified { - logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err) - } else { - logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err) - } - } -} - -// CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp -func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd { - cmd.Args = append(cmd.Args, command...) - cmd.Env = os.Environ() - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - return cmd -} - -// LoginUser starts the user process on the host so that image scp can use systemd-run -func LoginUser(user string) (*exec.Cmd, error) { - sleep, err := exec.LookPath("sleep") - if err != nil { - return nil, err - } - machinectl, err := exec.LookPath("machinectl") - if err != nil { - return nil, err - } - cmd := exec.Command(machinectl, "shell", "-q", user+"@.host", sleep, "inf") - err = cmd.Start() - return cmd, err -} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_supported.go b/vendor/github.com/containers/podman/v4/utils/utils_supported.go deleted file mode 100644 index c2dcc463152..00000000000 --- a/vendor/github.com/containers/podman/v4/utils/utils_supported.go +++ /dev/null @@ -1,207 +0,0 @@ -//go:build linux || darwin -// +build linux darwin - -package utils - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/pkg/rootless" - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// RunUnderSystemdScope adds the specified pid to a systemd scope -func RunUnderSystemdScope(pid int, slice string, unitName string) error { - var properties []systemdDbus.Property - var conn *systemdDbus.Conn - var err error - - if rootless.IsRootless() { - conn, err = cgroups.GetUserConnection(rootless.GetRootlessUID()) - if err != nil { - return err - } - } else { - conn, err = systemdDbus.NewWithContext(context.Background()) - if err != nil { - return err - } - } - defer conn.Close() - properties = append(properties, systemdDbus.PropSlice(slice)) - properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) - properties = append(properties, newProp("Delegate", true)) - properties = append(properties, newProp("DefaultDependencies", false)) - ch := make(chan string) - _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) - if err != nil { - // On errors check if the cgroup already exists, if it does move the process there - if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { - if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { - if err := moveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { - return nil - } - // On errors return the original error message we got from StartTransientUnit. - } - } - return err - } - - // Block until job is started - <-ch - - return nil -} - -func getCgroupProcess(procFile string, allowRoot bool) (string, error) { - f, err := os.Open(procFile) - if err != nil { - return "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - cgroup := "" - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - if len(parts) != 3 { - return "", errors.Errorf("cannot parse cgroup line %q", line) - } - if strings.HasPrefix(line, "0::") { - cgroup = line[3:] - break - } - if len(parts[2]) > len(cgroup) { - cgroup = parts[2] - } - } - if len(cgroup) == 0 || (!allowRoot && cgroup == "/") { - return "", errors.Errorf("could not find cgroup mount in %q", procFile) - } - return cgroup, nil -} - -// GetOwnCgroup returns the cgroup for the current process. -func GetOwnCgroup() (string, error) { - return getCgroupProcess("/proc/self/cgroup", true) -} - -func GetOwnCgroupDisallowRoot() (string, error) { - return getCgroupProcess("/proc/self/cgroup", false) -} - -// GetCgroupProcess returns the cgroup for the specified process process. -func GetCgroupProcess(pid int) (string, error) { - return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true) -} - -// MoveUnderCgroupSubtree moves the PID under a cgroup subtree. -func MoveUnderCgroupSubtree(subtree string) error { - return moveUnderCgroup("", subtree, nil) -} - -// moveUnderCgroup moves a group of processes to a new cgroup. -// If cgroup is the empty string, then the current calling process cgroup is used. -// If processes is empty, then the processes from the current cgroup are moved. -func moveUnderCgroup(cgroup, subtree string, processes []uint32) error { - procFile := "/proc/self/cgroup" - f, err := os.Open(procFile) - if err != nil { - return err - } - defer f.Close() - - unifiedMode, err := cgroups.IsCgroup2UnifiedMode() - if err != nil { - return err - } - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - if len(parts) != 3 { - return errors.Errorf("cannot parse cgroup line %q", line) - } - - // root cgroup, skip it - if parts[2] == "/" { - continue - } - - cgroupRoot := "/sys/fs/cgroup" - // Special case the unified mount on hybrid cgroup and named hierarchies. - // This works on Fedora 31, but we should really parse the mounts to see - // where the cgroup hierarchy is mounted. - if parts[1] == "" && !unifiedMode { - // If it is not using unified mode, the cgroup v2 hierarchy is - // usually mounted under /sys/fs/cgroup/unified - cgroupRoot = filepath.Join(cgroupRoot, "unified") - - // Ignore the unified mount if it doesn't exist - if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { - continue - } - } else if parts[1] != "" { - // Assume the controller is mounted at /sys/fs/cgroup/$CONTROLLER. - controller := strings.TrimPrefix(parts[1], "name=") - cgroupRoot = filepath.Join(cgroupRoot, controller) - } - - parentCgroup := cgroup - if parentCgroup == "" { - parentCgroup = parts[2] - } - newCgroup := filepath.Join(cgroupRoot, parentCgroup, subtree) - if err := os.MkdirAll(newCgroup, 0755); err != nil && !os.IsExist(err) { - return err - } - - f, err := os.OpenFile(filepath.Join(newCgroup, "cgroup.procs"), os.O_RDWR, 0755) - if err != nil { - return err - } - defer f.Close() - - if len(processes) > 0 { - for _, pid := range processes { - if _, err := f.Write([]byte(fmt.Sprintf("%d\n", pid))); err != nil { - logrus.Debugf("Cannot move process %d to cgroup %q: %v", pid, newCgroup, err) - } - } - } else { - processesData, err := ioutil.ReadFile(filepath.Join(cgroupRoot, parts[2], "cgroup.procs")) - if err != nil { - return err - } - for _, pid := range bytes.Split(processesData, []byte("\n")) { - if len(pid) == 0 { - continue - } - if _, err := f.Write(pid); err != nil { - logrus.Debugf("Cannot move process %s to cgroup %q: %v", string(pid), newCgroup, err) - } - } - } - } - return nil -} - -func newProp(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_windows.go b/vendor/github.com/containers/podman/v4/utils/utils_windows.go deleted file mode 100644 index 1d017f5ae7e..00000000000 --- a/vendor/github.com/containers/podman/v4/utils/utils_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build windows -// +build windows - -package utils - -import "github.com/pkg/errors" - -func RunUnderSystemdScope(pid int, slice string, unitName string) error { - return errors.New("not implemented for windows") -} - -func MoveUnderCgroupSubtree(subtree string) error { - return errors.New("not implemented for windows") -} - -func GetOwnCgroup() (string, error) { - return "", errors.New("not implemented for windows") -} - -func GetOwnCgroupDisallowRoot() (string, error) { - return "", errors.New("not implemented for windows") -} - -func GetCgroupProcess(pid int) (string, error) { - return "", errors.New("not implemented for windows") -} diff --git a/vendor/github.com/containers/podman/v4/version/version.go b/vendor/github.com/containers/podman/v4/version/version.go index 32f1ee0e762..4513dc3bc37 100644 --- a/vendor/github.com/containers/podman/v4/version/version.go +++ b/vendor/github.com/containers/podman/v4/version/version.go @@ -1,7 +1,7 @@ package version import ( - "github.com/blang/semver" + "github.com/blang/semver/v4" ) type ( @@ -27,21 +27,22 @@ const ( // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -var Version = semver.MustParse("4.1.1") +var Version = semver.MustParse("4.4.2") // See https://docs.docker.com/engine/api/v1.40/ // libpod compat handlers are expected to honor docker API versions // APIVersion provides the current and minimal API versions for compat and libpod endpoint trees // Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow -// clients to shop for the Version they wish to support +// +// clients to shop for the Version they wish to support var APIVersion = map[Tree]map[Level]semver.Version{ Libpod: { CurrentAPI: Version, MinimalAPI: semver.MustParse("4.0.0"), }, Compat: { - CurrentAPI: semver.MustParse("1.40.0"), + CurrentAPI: semver.MustParse("1.41.0"), MinimalAPI: semver.MustParse("1.24.0"), }, } diff --git a/vendor/github.com/containers/psgo/README.md b/vendor/github.com/containers/psgo/README.md index 684c80a0c08..e54423ca78a 100644 --- a/vendor/github.com/containers/psgo/README.md +++ b/vendor/github.com/containers/psgo/README.md @@ -83,6 +83,8 @@ The ps library is compatible with all AIX format descriptors of the ps command-l - The corresponding host PID of a container process. - **huser** - The corresponding effective user of a container process on the host. +- **huid** + - The corresponding host UID of a container process. - **label** - Current security attributes of the process. - **seccomp** diff --git a/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go b/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go index 1a60b96c418..9545ed57b3a 100644 --- a/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go +++ b/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go @@ -63,11 +63,14 @@ var ( 35: "WAKE_ALARM", 36: "BLOCK_SUSPEND", 37: "AUDIT_READ", + 38: "PERFMON", + 39: "BPF", + 40: "CHECKPOINT_RESTORE", } // FullCAPs represents the value of a bitmask with a full capability // set. - FullCAPs = uint64(0x3FFFFFFFFF) + FullCAPs = uint64(0x1FFFFFFFFFF) ) // TranslateMask iterates over mask and returns a slice of corresponding diff --git a/vendor/github.com/containers/psgo/internal/proc/stat.go b/vendor/github.com/containers/psgo/internal/proc/stat.go index e3286704cc4..5e0bafb2b53 100644 --- a/vendor/github.com/containers/psgo/internal/proc/stat.go +++ b/vendor/github.com/containers/psgo/internal/proc/stat.go @@ -32,7 +32,7 @@ type Stat struct { // whether or not the executable is swapped out. Comm string // (3) The process state (e.g., running, sleeping, zombie, dead). - // Refer to proc(5) for further deatils. + // Refer to proc(5) for further details. State string // (4) The PID of the parent of this process. Ppid string diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go index d6cfcef4dc0..5c2c4d0b0b6 100644 --- a/vendor/github.com/containers/psgo/psgo.go +++ b/vendor/github.com/containers/psgo/psgo.go @@ -181,6 +181,11 @@ var ( header: "USER", procFn: processUSER, }, + { + normal: "uid", + header: "UID", + procFn: processUID, + }, { code: "%a", normal: "args", @@ -294,6 +299,12 @@ var ( onHost: true, procFn: processHUSER, }, + { + normal: "huid", + header: "HUID", + onHost: true, + procFn: processHUID, + }, { normal: "hgroup", header: "HGROUP", @@ -648,6 +659,11 @@ func processUSER(p *process.Process, ctx *psContext) (string, error) { return process.LookupUID(p.Status.Uids[1]) } +// processUID returns the effective UID of the process as the decimal representation. +func processUID(p *process.Process, ctx *psContext) (string, error) { + return p.Status.Uids[1], nil +} + // processRUSER returns the effective user name of the process. This will be // the textual user ID, if it can be obtained, or a decimal representation // otherwise. @@ -857,6 +873,23 @@ func processHUSER(p *process.Process, ctx *psContext) (string, error) { return "?", nil } +// processHUID returns the effective UID of the corresponding host process +// of the (container) as the decimal representation or "?" if no corresponding +// process could be found. +func processHUID(p *process.Process, ctx *psContext) (string, error) { + if hp := findHostProcess(p, ctx); hp != nil { + if ctx.opts != nil && len(ctx.opts.UIDMap) > 0 { + // Return uid without searching its textual representation. + lookupFunc := func(uid string) (string, error) { + return uid, nil + } + return findID(hp.Status.Uids[1], ctx.opts.UIDMap, lookupFunc, "/proc/sys/fs/overflowuid") + } + return hp.Status.Uids[1], nil + } + return "?", nil +} + // processHGROUP returns the effective group ID of the corresponding host // process of the (container) or "?" if no corresponding process could be // found. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 53b13cd339c..6f9048564c9 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -18,7 +18,6 @@ env: #### Cache-image names to test with (double-quotes around names are critical) ### FEDORA_NAME: "fedora-36" - PRIOR_FEDORA_NAME: "fedora-35" UBUNTU_NAME: "ubuntu-2204" # GCE project where images live @@ -26,7 +25,6 @@ env: # VM Image built in containers/automation_images IMAGE_SUFFIX: "c5878804328480768" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### @@ -74,6 +72,8 @@ fedora_testing_task: &fedora_testing TEST_DRIVER: "vfs" - env: TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "overlay-transient" - env: TEST_DRIVER: "fuse-overlay" - env: @@ -90,15 +90,6 @@ fedora_testing_task: &fedora_testing journal_log_script: '${_JOURNALCMD} || true' -prior_fedora_testing_task: - <<: *fedora_testing - alias: prior_fedora_testing - name: *std_test_name - env: - OS_NAME: "${PRIOR_FEDORA_NAME}" - VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - - ubuntu_testing_task: &ubuntu_testing <<: *fedora_testing alias: ubuntu_testing @@ -117,7 +108,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.16 + image: golang:1.17 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -125,14 +116,14 @@ lint_task: echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list apt-get update apt-get install -y libbtrfs-dev libdevmapper-dev - test_script: make lint + test_script: make TAGS=regex_precompile local-validate && make lint && make clean # Update metadata on VM images referenced by this repository state meta_task: container: - image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" + image: "quay.io/libpod/imgts:latest" cpu: 1 memory: 1 @@ -140,7 +131,6 @@ meta_task: # Space-separated list of images used by this repository state IMGNAMES: |- ${FEDORA_CACHE_IMAGE_NAME} - ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" @@ -154,7 +144,7 @@ meta_task: vendor_task: container: - image: golang:1.16 + image: golang:1.17 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -167,11 +157,10 @@ success_task: depends_on: - lint - fedora_testing - - prior_fedora_testing - ubuntu_testing - meta - vendor container: - image: golang:1.16 + image: golang:1.17 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 244576d546a..ea2bb6406e7 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -60,7 +60,7 @@ local-gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage local-cross: ## cross build the binaries for arm, darwin, and freebsd - @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ @@ -92,7 +92,7 @@ local-test-integration: local-binary ## run the integration tests on the host (r test-integration: local-binary ## run the integration tests using VMs $(RUNINVM) $(MAKE) local-$@ -local-validate: ## validate DCO and gofmt on the host +local-validate: install.tools ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh @@ -117,9 +117,9 @@ help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor vendor: - $(GO) mod tidy + $(GO) mod tidy -compat=1.17 $(GO) mod vendor $(GO) mod verify diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index a50908ca3da..53999456ea9 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.42.0 +1.45.3 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 81c9894c5d0..106d1d152e0 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -3,7 +3,6 @@ package storage import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -11,11 +10,28 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" ) +type containerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableContainerLocation containerLocations = 1 << iota + volatileContainerLocation + + numContainerLocationIndex = iota +) + +func containerLocationFromIndex(index int) containerLocations { + return 1 << index +} + // A Container is a reference to a read-write layer with metadata. type Container struct { // ID is either one which was specified at create-time, or a random @@ -65,14 +81,30 @@ type Container struct { GIDMap []idtools.IDMap `json:"gidmap,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"` + + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` } -// ContainerStore provides bookkeeping for information about Containers. -type ContainerStore interface { - FileBasedStore - MetadataStore - ContainerBigDataStore - FlaggableStore +// rwContainerStore provides bookkeeping for information about Containers. +type rwContainerStore interface { + metadataStore + containerBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Create creates a container that has a specified ID (or generates a // random one if an empty value is supplied) and optional names, @@ -82,18 +114,8 @@ type ContainerStore interface { // convenience of the caller, nothing more. Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) - // SetNames updates the list of names associated with the container - // with the specified ID. - // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. - SetNames(id string, names []string) error - - // AddNames adds the supplied values to the list of names associated with the container with - // the specified id. - AddNames(id string, names []string) error - - // RemoveNames removes the supplied values from the list of names associated with the container with - // the specified id. - RemoveNames(id string, names []string) error + // updateNames modifies names associated with a container based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -113,17 +135,27 @@ type ContainerStore interface { // Containers returns a slice enumerating the known containers. Containers() ([]Container, error) + + // Clean up unreferenced datadirs + GarbageCollect() error } type containerStore struct { - lockfile Locker - dir string + // The following fields are only set when constructing containerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process. + dir string + jsonPath [numContainerLocationIndex]string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite containers []*Container idindex *truncindex.TruncIndex byid map[string]*Container bylayer map[string]*Container byname map[string]*Container - loadMut sync.Mutex } func copyContainer(c *Container) *Container { @@ -140,30 +172,31 @@ func copyContainer(c *Container) *Container { UIDMap: copyIDMap(c.UIDMap), GIDMap: copyIDMap(c.GIDMap), Flags: copyStringInterfaceMap(c.Flags), + volatileStore: c.volatileStore, } } func (c *Container) MountLabel() string { - if label, ok := c.Flags["MountLabel"].(string); ok { + if label, ok := c.Flags[mountLabelFlag].(string); ok { return label } return "" } func (c *Container) ProcessLabel() string { - if label, ok := c.Flags["ProcessLabel"].(string); ok { + if label, ok := c.Flags[processLabelFlag].(string); ok { return label } return "" } func (c *Container) MountOpts() []string { - switch c.Flags["MountOpts"].(type) { + switch value := c.Flags[mountOptsFlag].(type) { case []string: - return c.Flags["MountOpts"].([]string) + return value case []interface{}: var mountOpts []string - for _, v := range c.Flags["MountOpts"].([]interface{}) { + for _, v := range value { if flag, ok := v.(string); ok { mountOpts = append(mountOpts, flag) } @@ -174,6 +207,187 @@ func (c *Container) MountOpts() []string { } } +// The caller must hold r.inProcessLock for reading. +func containerLocation(c *Container) containerLocations { + if c.volatileStore { + return volatileContainerLocation + } + return stableContainerLocation +} + +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of containerStore construction, every other caller +// should use startWriting() instead. +func (r *containerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *containerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *containerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *containerStore) startReading() error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil. + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate container names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + unlockFn = nil + return nil +} + +// stopReading releases locks obtained by startReading. +func (r *containerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *containerStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. func (r *containerStore) Containers() ([]Container, error) { containers := make([]Container, len(r.containers)) for i := range r.containers { @@ -182,8 +396,38 @@ func (r *containerStore) Containers() ([]Container, error) { return containers, nil } -func (r *containerStore) containerspath() string { - return filepath.Join(r.dir, "containers.json") +// This looks for datadirs in the store directory that are not referenced +// by the json file and removes it. These can happen in the case of unclean +// shutdowns or regular restarts in transient store mode. +// Requires startReading. +func (r *containerStore) GarbageCollect() error { + entries, err := os.ReadDir(r.dir) + if err != nil { + // Unexpected, don't try any GC + return err + } + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + // Should the id be there? + if r.byid[id] != nil { + continue + } + + // Otherwise remove datadir + moreErr := os.RemoveAll(filepath.Join(r.dir, id)) + // Propagate first error + if moreErr != nil && err == nil { + err = moreErr + } + } + + return err } func (r *containerStore) datadir(id string) string { @@ -194,84 +438,178 @@ func (r *containerStore) datapath(id, key string) string { return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) } -func (r *containerStore) Load() error { - needSave := false - rpath := r.containerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *containerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations containerLocations containers := []*Container{} - layers := make(map[string]*Container) - idlist := []string{} + ids := make(map[string]*Container) + + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationContainers := []*Container{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationContainers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + + for _, container := range locationContainers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[container.ID] != nil { + continue // skip invalid duplicated container + } + // Remember where the container came from + if location == volatileContainerLocation { + container.volatileStore = true + } + containers = append(containers, container) + ids[container.ID] = container + } + } + + idlist := make([]string, 0, len(containers)) + layers := make(map[string]*Container) names := make(map[string]*Container) - if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(containers)) - for n, container := range containers { - idlist = append(idlist, container.ID) - ids[container.ID] = containers[n] - layers[container.LayerID] = containers[n] - for _, name := range container.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - needSave = true - } - names[name] = containers[n] + var errorToResolveBySaving error // == nil + for n, container := range containers { + idlist = append(idlist, container.ID) + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock") + modifiedLocations |= containerLocation(container) } + names[name] = containers[n] } } + r.containers = containers - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.bylayer = layers r.byname = names - if needSave { - return r.Save() + if errorToResolveBySaving != nil { + if !lockedForWriting { + return true, errorToResolveBySaving + } + return false, r.save(modifiedLocations) } - return nil + return false, nil } -func (r *containerStore) Save() error { - if !r.Locked() { - return errors.New("container store is not locked") - } - rpath := r.containerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) save(saveLocations containerLocations) error { + r.lockfile.AssertLockedForWriting() + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + subsetContainers := make([]*Container, 0, len(r.containers)) + for _, container := range r.containers { + if containerLocation(container) == location { + subsetContainers = append(subsetContainers, container) + } + } + + jdata, err := json.Marshal(&subsetContainers) + if err != nil { + return err + } + var opts *ioutils.AtomicFileWriterOptions + if location == volatileContainerLocation { + opts = &ioutils.AtomicFileWriterOptions{ + NoSync: true, + } + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil { + return err + } } - jdata, err := json.Marshal(&r.containers) + lw, err := r.lockfile.RecordWrite() if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) + r.lastWrite = lw + return nil } -func newContainerStore(dir string) (ContainerStore, error) { +// saveFor saves the contents of the store relevant for modifiedContainer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) saveFor(modifiedContainer *Container) error { + return r.save(containerLocation(modifiedContainer)) +} + +func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { if err := os.MkdirAll(dir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + volatileDir := dir + if transient { + if err := os.MkdirAll(runDir, 0700); err != nil { + return nil, err + } + volatileDir = runDir + } + lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock")) if err != nil { return nil, err } - lockfile.Lock() - defer lockfile.Unlock() cstore := containerStore{ - lockfile: lockfile, - dir: dir, + lockfile: lockfile, + dir: dir, + jsonPath: [numContainerLocationIndex]string{ + filepath.Join(dir, "containers.json"), + filepath.Join(volatileDir, "volatile-containers.json"), + }, + containers: []*Container{}, byid: make(map[string]*Container), bylayer: make(map[string]*Container), byname: make(map[string]*Container), } - if err := cstore.Load(); err != nil { + + if err := cstore.startWritingWithReload(false); err != nil { + return nil, err + } + cstore.lastWrite, err = cstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + defer cstore.stopWriting() + if _, err := cstore.load(true); err != nil { return nil, err } return &cstore, nil } +// Requires startReading or startWriting. func (r *containerStore) lookup(id string) (*Container, bool) { if container, ok := r.byid[id]; ok { return container, ok @@ -287,15 +625,17 @@ func (r *containerStore) lookup(id string) (*Container, bool) { return nil, false } +// Requires startWriting. func (r *containerStore) ClearFlag(id string, flag string) error { container, ok := r.lookup(id) if !ok { return ErrContainerUnknown } delete(container.Flags, flag) - return r.Save() + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { container, ok := r.lookup(id) if !ok { @@ -305,9 +645,10 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro container.Flags = make(map[string]interface{}) } container.Flags[flag] = value - return r.Save() + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { if id == "" { id = stringid.GenerateRandomID() @@ -321,10 +662,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat return nil, ErrDuplicateID } if options.MountOpts != nil { - options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...) } if options.Volatile { - options.Flags["Volatile"] = true + options.Flags[volatileFlag] = true } names = dedupeNames(names) for _, name := range names { @@ -338,34 +679,36 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat if err := hasOverlappingRanges(options.GIDMap); err != nil { return nil, err } - if err == nil { - container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: time.Now().UTC(), - Flags: copyStringInterfaceMap(options.Flags), - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - } - r.containers = append(r.containers, container) - r.byid[id] = container - r.idindex.Add(id) - r.bylayer[layer] = container - for _, name := range names { - r.byname[name] = container - } - err = r.Save() - container = copyContainer(container) + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + volatileStore: options.Volatile, + } + r.containers = append(r.containers, container) + r.byid[id] = container + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container } + err = r.saveFor(container) + container = copyContainer(container) return container, err } +// Requires startReading or startWriting. func (r *containerStore) Metadata(id string) (string, error) { if container, ok := r.lookup(id); ok { return container.Metadata, nil @@ -373,31 +716,21 @@ func (r *containerStore) Metadata(id string) (string, error) { return "", ErrContainerUnknown } +// Requires startWriting. func (r *containerStore) SetMetadata(id, metadata string) error { if container, ok := r.lookup(id); ok { container.Metadata = metadata - return r.Save() + return r.saveFor(container) } return ErrContainerUnknown } +// The caller must hold r.inProcessLock for writing. func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } -// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. -func (r *containerStore) SetNames(id string, names []string) error { - return r.updateNames(id, names, setNames) -} - -func (r *containerStore) AddNames(id string, names []string) error { - return r.updateNames(id, names, addNames) -} - -func (r *containerStore) RemoveNames(id string, names []string) error { - return r.updateNames(id, names, removeNames) -} - +// Requires startWriting. func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { container, ok := r.lookup(id) if !ok { @@ -418,9 +751,10 @@ func (r *containerStore) updateNames(id string, names []string, op updateNameOpe r.byname[name] = container } container.Names = names - return r.Save() + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) Delete(id string) error { container, ok := r.lookup(id) if !ok { @@ -435,7 +769,9 @@ func (r *containerStore) Delete(id string) error { } } delete(r.byid, id) - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) delete(r.bylayer, container.LayerID) for _, name := range container.Names { delete(r.byname, name) @@ -448,7 +784,7 @@ func (r *containerStore) Delete(id string) error { r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) } } - if err := r.Save(); err != nil { + if err := r.saveFor(container); err != nil { return err } if err := os.RemoveAll(r.datadir(id)); err != nil { @@ -457,6 +793,7 @@ func (r *containerStore) Delete(id string) error { return nil } +// Requires startReading or startWriting. func (r *containerStore) Get(id string) (*Container, error) { if container, ok := r.lookup(id); ok { return copyContainer(container), nil @@ -464,6 +801,7 @@ func (r *containerStore) Get(id string) (*Container, error) { return nil, ErrContainerUnknown } +// Requires startReading or startWriting. func (r *containerStore) Lookup(name string) (id string, err error) { if container, ok := r.lookup(name); ok { return container.ID, nil @@ -471,11 +809,13 @@ func (r *containerStore) Lookup(name string) (id string, err error) { return "", ErrContainerUnknown } +// Requires startReading or startWriting. func (r *containerStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *containerStore) BigData(id, key string) ([]byte, error) { if key == "" { return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName) @@ -484,9 +824,10 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) { if !ok { return nil, ErrContainerUnknown } - return ioutil.ReadFile(r.datapath(c.ID, key)) + return os.ReadFile(r.datapath(c.ID, key)) } +// Requires startWriting. Yes, really, WRITING (see SetBigData). func (r *containerStore) BigDataSize(id, key string) (int64, error) { if key == "" { return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName) @@ -495,10 +836,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { if !ok { return -1, ErrContainerUnknown } - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } - if size, ok := c.BigDataSizes[key]; ok { + if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -517,6 +855,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } +// Requires startWriting. Yes, really, WRITING (see SetBigData). func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName) @@ -525,10 +864,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { if !ok { return "", ErrContainerUnknown } - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := c.BigDataDigests[key]; ok { + if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return d, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -547,6 +883,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { return "", ErrDigestUnknown } +// Requires startReading or startWriting. func (r *containerStore) BigDataNames(id string) ([]string, error) { c, ok := r.lookup(id) if !ok { @@ -555,6 +892,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { return copyStringSlice(c.BigDataNames), nil } +// Requires startWriting. func (r *containerStore) SetBigData(id, key string, data []byte) error { if key == "" { return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName) @@ -595,12 +933,13 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { save = true } if save { - err = r.Save() + err = r.saveFor(c) } } return err } +// Requires startWriting. func (r *containerStore) Wipe() error { ids := make([]string, 0, len(r.byid)) for id := range r.byid { @@ -613,50 +952,3 @@ func (r *containerStore) Wipe() error { } return nil } - -func (r *containerStore) Lock() { - r.lockfile.Lock() -} - -func (r *containerStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *containerStore) RLock() { - r.lockfile.RLock() -} - -func (r *containerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *containerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *containerStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *containerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *containerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *containerStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *containerStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err -} diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go new file mode 100644 index 00000000000..04972d83880 --- /dev/null +++ b/vendor/github.com/containers/storage/deprecated.go @@ -0,0 +1,216 @@ +package storage + +import ( + "io" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" +) + +// The type definitions in this file exist ONLY to maintain formal API compatibility. +// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES. + +// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROFileBasedStore interface { + Locker + Load() error + ReloadIfChanged() error +} + +// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWFileBasedStore interface { + Save() error +} + +// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROMetadataStore interface { + Metadata(id string) (string, error) +} + +// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWMetadataStore interface { + SetMetadata(id, metadata string) error +} + +// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROBigDataStore interface { + BigData(id, key string) ([]byte, error) + BigDataSize(id, key string) (int64, error) + BigDataDigest(id, key string) (digest.Digest, error) + BigDataNames(id string) ([]string, error) +} + +// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWImageBigDataStore interface { + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerBigDataStore interface { + ROBigDataStore + SetBigData(id, key string, data []byte) error +} + +// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerBigDataStore interface { + BigData(id, key string) (io.ReadCloser, error) + BigDataNames(id string) ([]string, error) +} + +// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWLayerBigDataStore interface { + SetBigData(id, key string, data io.Reader) error +} + +// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerBigDataStore interface { + ROLayerBigDataStore + RWLayerBigDataStore +} + +// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FlaggableStore interface { + ClearFlag(id string, flag string) error + SetFlag(id string, flag string, value interface{}) error +} + +// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Get(id string) (*Container, error) + Exists(id string) bool + Delete(id string) error + Wipe() error + Lookup(name string) (string, error) + Containers() ([]Container, error) +} + +// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + Exists(id string) bool + Get(id string) (*Image, error) + Lookup(name string) (string, error) + Images() ([]Image, error) + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error +} + +// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + ROLayerBigDataStore + Exists(id string) bool + Get(id string) (*Layer, error) + Status() ([][2]string, error) + Changes(from, to string) ([]archive.Change, error) + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + DiffSize(from, to string) (int64, error) + Size(name string) (int64, error) + Lookup(name string) (string, error) + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + Layers() ([]Layer, error) +} + +// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + RWLayerBigDataStore + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error + Mount(id string, options drivers.MountOpts) (string, error) + Unmount(id string, force bool) (bool, error) + Mounted(id string) (int, error) + ParentOwners(id string) (uids, gids []int, err error) + ApplyDiff(to string, diff io.Reader) (int64, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + CleanupStagingDirectory(stagingDirectory string) error + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + DifferTarget(id string) (string, error) + LoadLocked() error + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index dd5685aca5a..10341d41ac6 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -29,7 +29,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "os" "os/exec" "path" @@ -47,7 +46,7 @@ import ( mountpk "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/userns" + "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" @@ -68,7 +67,7 @@ var ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("aufs", Init) + graphdriver.MustRegister("aufs", Init) } // Driver contains information about the filesystem mounted. @@ -170,7 +169,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) for _, path := range []string{"mnt", "diff"} { p := filepath.Join(home, path) - entries, err := ioutil.ReadDir(p) + entries, err := os.ReadDir(p) if err != nil { logger.WithError(err).WithField("dir", p).Error("error reading dir entries") continue @@ -200,7 +199,7 @@ func supportsAufs() error { // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() - if userns.RunningInUserNS() { + if unshare.IsRootless() { return ErrAufsNested } @@ -252,6 +251,11 @@ func (a *Driver) Exists(id string) bool { return true } +// List layers (not including additional image stores) +func (a *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (a *Driver) AdditionalImageStores() []string { return nil @@ -730,14 +734,14 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "storage-aufs-base") + base, err := os.MkdirTemp("", "storage-aufs-base") if err != nil { logrus.Errorf("Checking dirperm1: %v", err) return } defer os.RemoveAll(base) - union, err := ioutil.TempDir("", "storage-aufs-union") + union, err := os.MkdirTemp("", "storage-aufs-union") if err != nil { logrus.Errorf("Checking dirperm1: %v", err) return diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go index d2325fc46cd..27e62163312 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -1,17 +1,17 @@ +//go:build linux // +build linux package aufs import ( "bufio" - "io/ioutil" "os" "path" ) // Return all the directories func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) + dirs, err := os.ReadDir(root) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go index 100e7537a9c..156f4a4f0e2 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package aufs diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go deleted file mode 100644 index d030b066378..00000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index be44390da50..3d9053297bc 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -6,6 +6,9 @@ package btrfs /* #include #include + +// keep struct field name compatible with btrfs-progs < 6.1. +#define max_referenced max_rfer #include #include @@ -18,7 +21,6 @@ import "C" import ( "fmt" "io/fs" - "io/ioutil" "math" "os" "path" @@ -43,7 +45,7 @@ import ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("btrfs", Init) + graphdriver.MustRegister("btrfs", Init) } type btrfsOptions struct { @@ -383,7 +385,7 @@ func subvolLimitQgroup(path string, size uint64) error { defer closeDir(dir) var args C.struct_btrfs_ioctl_qgroup_limit_args - args.lim.max_referenced = C.__u64(size) + args.lim.max_rfer = C.__u64(size) args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) @@ -524,7 +526,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { return err } - if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { return err } } @@ -643,7 +645,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return "", fmt.Errorf("%s: not a directory", dir) } - if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil { if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { if err := d.enableQuota(); err != nil { return "", err @@ -677,6 +679,11 @@ func (d *Driver) Exists(id string) bool { return err == nil } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go index f07088887a1..c7d9d3b8453 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !cgo // +build !linux !cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go index edd8bdab85e..5816139f35e 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -1,3 +1,4 @@ +//go:build linux && !btrfs_noversion && cgo // +build linux,!btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go index 905e834e354..a61d8fbd9ac 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -1,4 +1,5 @@ -// +build !linux btrfs_noversion !cgo +//go:build linux && btrfs_noversion && cgo +// +build linux,btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index bad654b598c..ca43c3f0574 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -2,6 +2,7 @@ package graphdriver import ( "bytes" + "errors" "fmt" "os" @@ -93,7 +94,7 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error return err } if len(output) > 0 { - return fmt.Errorf(string(output)) + return errors.New(string(output)) } return nil @@ -114,7 +115,7 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { // on-disk owner UIDs and GIDs which are "host" values in the first map with // UIDs and GIDs for "host" values from the second map which correspond to the // same "container" IDs. -func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) { driver := n.ProtoDriver options := MountOpts{ MountLabel: mountLabel, @@ -123,9 +124,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost if err != nil { return err } - defer func() { - driver.Put(id) - }() + defer driverPut(driver, id, &retErr) return ChownPathByMaps(layerFs, toContainer, toHost) } diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go index a732075fbb1..d6150ceeee3 100644 --- a/vendor/github.com/containers/storage/drivers/chown_darwin.go +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai uid, gid = mappedPair.UID, mappedPair.GID } if uid != int(st.Uid) || gid != int(st.Gid) { - cap, err := system.Lgetxattr(path, "security.capability") + capability, err := system.Lgetxattr(path, "security.capability") if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %w", os.Args[0], err) } @@ -98,8 +98,8 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai return fmt.Errorf("%s: %w", os.Args[0], err) } } - if cap != nil { - if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + if capability != nil { + if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil { return fmt.Errorf("%s: %w", os.Args[0], err) } } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index b92b3b12def..58c0b5daf1a 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -27,7 +27,6 @@ import ( "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" - "github.com/opencontainers/runc/libcontainer/userns" "golang.org/x/sys/unix" ) @@ -207,7 +206,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { s.Close() case mode&os.ModeDevice != 0: - if userns.RunningInUserNS() { + if unshare.IsRootless() { // cannot create a device if running in user namespace return nil } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go index e97523c3578..470e6a72476 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -1,6 +1,7 @@ +//go:build !linux || !cgo // +build !linux !cgo -package copy +package copy //nolint: predeclared import ( "io" @@ -24,7 +25,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { } // CopyRegularToFile copies the content of a file to another -func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" f, err := os.Open(srcPath) if err != nil { return err @@ -35,6 +36,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c } // CopyRegular copies the content of a file to another -func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters" return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) } diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 3fc45495b2e..01576667616 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -58,6 +58,11 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { } infoOp(m) count := m.count + if count <= 0 { + // If the mounted path has been decremented enough have no references, + // then its entry can be removed. + delete(c.counts, path) + } c.mu.Unlock() return count } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index fef039d3fef..96c4cdacb4b 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -154,7 +153,7 @@ func readLVMConfig(root string) (directLVMConfig, error) { var cfg directLVMConfig p := filepath.Join(root, "setup-config.json") - b, err := ioutil.ReadFile(p) + b, err := os.ReadFile(p) if err != nil { if os.IsNotExist(err) { return cfg, nil @@ -166,9 +165,10 @@ func readLVMConfig(root string) (directLVMConfig, error) { if len(b) == 0 { return cfg, nil } - - err = json.Unmarshal(b, &cfg) - return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) + if err := json.Unmarshal(b, &cfg); err != nil { + return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) + } + return cfg, nil } func writeLVMConfig(root string, cfg directLVMConfig) error { @@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error { if err != nil { return fmt.Errorf("marshalling direct lvm config: %w", err) } - if err := ioutil.WriteFile(p, b, 0600); err != nil { + if err := os.WriteFile(p, b, 0600); err != nil { return fmt.Errorf("writing direct lvm config to file: %w", err) } return nil @@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error { } profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) if err != nil { return fmt.Errorf("writing storage thinp autoextend profile: %w", err) } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index 6989a438124..697a16fda73 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "os" "os/exec" "path" @@ -331,7 +330,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error { // Given json data and file path, write it to disk func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("devmapper: Error creating metadata file: %s", err) } @@ -630,7 +629,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + jsonData, err := os.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err } @@ -955,7 +954,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf func (devices *DeviceSet) loadMetadata(hash string) *devInfo { info := &devInfo{Hash: hash, devices: devices} - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + jsonData, err := os.ReadFile(devices.metadataFile(info)) if err != nil { logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) return nil @@ -1276,11 +1275,11 @@ func (devices *DeviceSet) setupBaseImage() error { } func setCloseOnExec(name string) { - fileInfos, _ := ioutil.ReadDir("/proc/self/fd") - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + fileEntries, _ := os.ReadDir("/proc/self/fd") + for _, e := range fileEntries { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name())) if link == name { - fd, err := strconv.Atoi(i.Name()) + fd, err := strconv.Atoi(e.Name()) if err == nil { unix.CloseOnExec(fd) } @@ -1370,7 +1369,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + jsonData, err := os.ReadFile(devices.transactionMetaFile()) if err != nil { // There is no active transaction. This will be the case // during upgrade. @@ -1451,7 +1450,7 @@ func (devices *DeviceSet) processPendingTransaction() error { } func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + jsonData, err := os.ReadFile(devices.deviceSetMetaFile()) if err != nil { // For backward compatibility return success if file does // not exist. @@ -2258,7 +2257,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { } func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) return diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go index 418b9e61087..f85fb947907 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index d2f165e26de..8b3ee51df77 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -1,10 +1,10 @@ +//go:build linux && cgo // +build linux,cgo package devmapper import ( "fmt" - "io/ioutil" "os" "path" "strconv" @@ -23,7 +23,7 @@ import ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("devicemapper", Init) + graphdriver.MustRegister("devicemapper", Init) } // Driver contains the device set mounted and the home directory @@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + if err := os.WriteFile(idFile, []byte(id), 0600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err @@ -267,6 +267,11 @@ func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go index 54db6ab4aea..52f0e863e5c 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go @@ -1,3 +1,6 @@ +//go:build linux && cgo +// +build linux,cgo + package devmapper import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go index 41e73faf525..db903ca5526 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index d4f92e682ce..b3b0614fd38 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -8,13 +8,12 @@ import ( "path/filepath" "strings" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" ) // FsMagic unsigned id of the filesystem in use. @@ -39,7 +38,7 @@ var ( ErrLayerUnknown = errors.New("unknown layer") ) -//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// CreateOpts contains optional arguments for Create() and CreateReadWrite() // methods. type CreateOpts struct { MountLabel string @@ -48,13 +47,13 @@ type CreateOpts struct { ignoreChownErrors bool } -// MountOpts contains optional arguments for LayerStope.Mount() methods. +// MountOpts contains optional arguments for Driver.Get() methods. type MountOpts struct { // Mount label is the MAC Labels to assign to mount point (SELINUX) MountLabel string // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point - UidMaps []idtools.IDMap // nolint: golint - GidMaps []idtools.IDMap // nolint: golint + UidMaps []idtools.IDMap //nolint: golint,revive + GidMaps []idtools.IDMap //nolint: golint Options []string // Volatile specifies whether the container storage can be optimized @@ -110,6 +109,9 @@ type ProtoDriver interface { // Exists returns whether a filesystem layer with the specified // ID exists on this driver. Exists(id string) bool + // Returns a list of layer ids that exist on this driver (does not include + // additional storage layers). Not supported by all backends. + ListLayers() ([]string, error) // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. Status() [][2]string @@ -279,6 +281,14 @@ func init() { drivers = make(map[string]InitFunc) } +// MustRegister registers an InitFunc for the driver, or panics. +// It is suitable for package’s init() sections. +func MustRegister(name string, initFunc InitFunc) { + if err := Register(name, initFunc); err != nil { + panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err)) + } +} + // Register registers an InitFunc for the driver. func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { @@ -312,6 +322,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) { type Options struct { Root string RunRoot string + DriverPriority []string DriverOptions []string UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap @@ -327,9 +338,18 @@ func New(name string, config Options) (Driver, error) { // Guess for prior driver driversMap := scanPriorDrivers(config.Root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. + + // use the supplied priority list unless it is empty + prioList := config.DriverPriority + if len(prioList) == 0 { + prioList = priority + } + + for _, name := range prioList { + if name == "vfs" && len(config.DriverPriority) == 0 { + // don't use vfs even if there is state present and vfs + // has not been explicitly added to the override driver + // priority list continue } if _, prior := driversMap[name]; prior { @@ -362,7 +382,7 @@ func New(name string, config Options) (Driver, error) { } // Check for priority drivers first - for _, name := range priority { + for _, name := range prioList { driver, err := getBuiltinDriver(name, config.Root, config) if err != nil { if isDriverNotSupported(err) { @@ -405,3 +425,21 @@ func scanPriorDrivers(root string) map[string]bool { } return driversMap } + +// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging. +// Typical usage: +// +// func …(…) (err error) { +// … +// defer driverPut(driver, id, &err) +// } +func driverPut(driver ProtoDriver, id string, mainErr *error) { + if err := driver.Put(id); err != nil { + err = fmt.Errorf("unmounting layer %s: %w", id, err) + if *mainErr == nil { + *mainErr = err + } else { + logrus.Errorf(err.Error()) + } + } +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index 7c527d279a6..b9e57a60d6c 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -7,6 +7,7 @@ import ( "path/filepath" "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -127,9 +128,14 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { var buf unix.Statfs_t - if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + path := filepath.Dir(rootpath) + if err := unix.Statfs(path, &buf); err != nil { return 0, err } + + if _, ok := FsNames[FsMagic(buf.Type)]; !ok { + logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path) + } return FsMagic(buf.Type), nil } diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 174fa9670bf..e3293091662 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 3932c3ea5c9..8119d9a6ccd 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows && !freebsd && !solaris && !darwin // +build !linux,!windows,!freebsd,!solaris,!darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index b619317e057..6b2496ec562 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -10,7 +10,7 @@ import ( "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" - "github.com/opencontainers/runc/libcontainer/userns" + "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -33,10 +33,11 @@ type NaiveDiffDriver struct { // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: -// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) -// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) -// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) -// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +// +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} } @@ -64,7 +65,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare defer func() { if err != nil { - driver.Put(id) + driverPut(driver, id, &err) } }() @@ -79,7 +80,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) + driverPut(driver, id, &err) return err }), nil } @@ -89,7 +90,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare if err != nil { return nil, err } - defer driver.Put(parent) + defer driverPut(driver, parent, &err) changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) if err != nil { @@ -103,20 +104,20 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) + driverPut(driver, id, &err) // NaiveDiffDriver compares file metadata with parent layers. Parent layers // are extracted from tar's with full second precision on modified time. // We need this hack here to make sure calls within same second receive // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) return err }), nil } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) { driver := gdw.ProtoDriver if idMappings == nil { @@ -133,7 +134,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if err != nil { return nil, err } - defer driver.Put(id) + defer driverPut(driver, id, &retErr) parentFs := "" @@ -146,7 +147,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if err != nil { return nil, err } - defer driver.Put(parent) + defer driverPut(driver, parent, &retErr) } return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) @@ -170,16 +171,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) if err != nil { return } - defer driver.Put(id) + defer driverPut(driver, id, &err) defaultForceMask := os.FileMode(0700) - var forceMask *os.FileMode = nil + var forceMask *os.FileMode // = nil if runtime.GOOS == "darwin" { forceMask = &defaultForceMask } tarOptions := &archive.TarOptions{ - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), IgnoreChownErrors: options.IgnoreChownErrors, ForceMask: forceMask, } @@ -223,7 +224,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, if err != nil { return } - defer driver.Put(id) + defer driverPut(driver, id, &err) return archive.ChangesSize(layerFs, changes), nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index c43ab4c1e25..0a0ad7dd553 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -6,7 +6,6 @@ package overlay import ( "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -27,7 +26,7 @@ import ( // directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. // When these exist naive diff should be used. func doesSupportNativeDiff(d, mountOpts string) error { - td, err := ioutil.TempDir(d, "opaque-bug-check") + td, err := os.MkdirTemp(d, "opaque-bug-check") if err != nil { return err } @@ -82,7 +81,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { return fmt.Errorf("failed to write to merged directory: %w", err) } @@ -121,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { // copying up a file from a lower layer unless/until its contents are being // modified func doesMetacopy(d, mountOpts string) (bool, error) { - td, err := ioutil.TempDir(d, "metacopy-check") + td, err := os.MkdirTemp(d, "metacopy-check") if err != nil { return false, err } @@ -158,7 +157,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { if errors.Is(err, unix.EINVAL) { - logrus.Info("metacopy option not supported on this kernel", mountOpts) + logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts) return false, nil } return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err) @@ -186,7 +185,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { // doesVolatile checks if the filesystem supports the "volatile" mount option func doesVolatile(d string) (bool, error) { - td, err := ioutil.TempDir(d, "volatile-check") + td, err := os.MkdirTemp(d, "volatile-check") if err != nil { return false, err } @@ -224,7 +223,7 @@ func doesVolatile(d string) (bool, error) { // supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of // a idmapped lower layer. func supportsIdmappedLowerLayers(home string) (bool, error) { - layerDir, err := ioutil.TempDir(home, "compat") + layerDir, err := os.MkdirTemp(home, "compat") if err != nil { return false, err } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go index 6d7913cbfab..bec455dd4f8 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -1,14 +1,17 @@ -// +build go1.16 +//go:build linux +// +build linux package overlay import ( + "errors" "io/fs" "path/filepath" "strings" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" ) func scanForMountProgramIndicators(home string) (detected bool, err error) { @@ -26,7 +29,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) { } if d.IsDir() { xattrs, err := system.Llistxattr(path) - if err != nil { + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { return err } for _, xattr := range xattrs { diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go index 30423e363a8..0b7c868ac66 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -5,8 +5,8 @@ package overlay import ( "fmt" - "io/ioutil" "os" + "runtime" "syscall" "unsafe" @@ -21,17 +21,6 @@ type attr struct { userNs uint64 } -const ( - // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr - _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint - - // _OPEN_TREE_CLONE - Clone the source path mount - _OPEN_TREE_CLONE = 0x00000001 //nolint:golint - - // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd - _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint -) - // openTree is a wrapper for the open_tree syscall func openTree(path string, flags int) (fd int, err error) { var _p0 *byte @@ -61,7 +50,7 @@ func moveMount(fdTree int, target string) (err error) { return err } - flags := _MOVE_MOUNT_F_EMPTY_PATH + flags := unix.MOVE_MOUNT_F_EMPTY_PATH _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), @@ -98,14 +87,14 @@ func createIDMappedMount(source, target string, pid int) error { } var attr attr - attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrSet = unix.MOUNT_ATTR_IDMAP attr.attrClr = 0 attr.propagation = 0 attr.userNs = uint64(userNsFile.Fd()) defer userNsFile.Close() - targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE) if err != nil { return err } @@ -124,7 +113,14 @@ func createIDMappedMount(source, target string, pid int) error { // createUsernsProcess forks the current process and creates a user namespace using the specified // mappings. It returns the pid of the new process. func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { - pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + var pid uintptr + var err syscall.Errno + + if runtime.GOARCH == "s390x" { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0) + } else { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + } if err != 0 { return -1, nil, err } @@ -144,7 +140,7 @@ func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, for _, m := range idmap { mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) } - return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) } if err := writeMappings("uid_map", uidMaps); err != nil { cleanupFunc() diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go index 2a1e9d0cc1a..bedda35078e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + package overlay import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index cf37f800797..de47951d4a1 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -8,14 +8,16 @@ import ( "flag" "fmt" "os" + "path/filepath" "runtime" + "strings" "github.com/containers/storage/pkg/reexec" "golang.org/x/sys/unix" ) func init() { - reexec.Register("storage-mountfrom", mountFromMain) + reexec.Register("storage-mountfrom", mountOverlayFromMain) } func fatal(err error) { @@ -31,7 +33,7 @@ type mountOptions struct { Flag uint32 } -func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { +func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error { options := &mountOptions{ Device: device, Target: target, @@ -67,7 +69,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e } // mountfromMain is the entry-point for storage-mountfrom on re-exec. -func mountFromMain() { +func mountOverlayFromMain() { runtime.LockOSThread() flag.Parse() @@ -77,11 +79,96 @@ func mountFromMain() { fatal(err) } - if err := os.Chdir(flag.Arg(0)); err != nil { + // Mount the arguments passed from the specified directory. Some of the + // paths mentioned in the values we pass to the kernel are relative to + // the specified directory. + homedir := flag.Arg(0) + if err := os.Chdir(homedir); err != nil { fatal(err) } - if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + pageSize := unix.Getpagesize() + if len(options.Label) < pageSize { + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + os.Exit(0) + } + + // Those arguments still took up too much space. Open the diff + // directories and use their descriptor numbers as lowers, using + // /proc/self/fd as the current directory. + + // Split out the various options, since we need to manipulate the + // paths, but we don't want to mess with other options. + var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string + for _, arg := range strings.Split(options.Label, ",") { + kv := strings.SplitN(arg, "=", 2) + switch kv[0] { + case "upperdir": + upperk = "upperdir=" + upperv = kv[1] + case "workdir": + workk = "workdir=" + workv = kv[1] + case "lowerdir": + lowerk = "lowerdir=" + lowerv = kv[1] + case "label": + labelk = "label=" + labelv = kv[1] + default: + if others == "" { + others = arg + } else { + others = others + "," + arg + } + } + } + + // Make sure upperdir, workdir, and the target are absolute paths. + if upperv != "" && !filepath.IsAbs(upperv) { + upperv = filepath.Join(homedir, upperv) + } + if workv != "" && !filepath.IsAbs(workv) { + workv = filepath.Join(homedir, workv) + } + if !filepath.IsAbs(options.Target) { + options.Target = filepath.Join(homedir, options.Target) + } + + // Get a descriptor for each lower, and use that descriptor's name as + // the new value for the list of lowers, because it's shorter. + if lowerv != "" { + lowers := strings.Split(lowerv, ":") + for i := range lowers { + lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0) + if err != nil { + fatal(err) + } + lowers[i] = fmt.Sprintf("%d", lowerFd) + } + lowerv = strings.Join(lowers, ":") + } + + // Reconstruct the Label field. + options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others + options.Label = strings.ReplaceAll(options.Label, ",,", ",") + + // Okay, try this, if we managed to make the arguments fit. + var err error + if len(options.Label) < pageSize { + if err := os.Chdir("/proc/self/fd"); err != nil { + fatal(err) + } + err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label) + } else { + err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize) + } + + // Clean up. + if err != nil { + fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label) fatal(err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 6bc8343f478..e33bf16db23 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" "path" @@ -18,6 +17,7 @@ import ( "strings" "sync" "syscall" + "unicode" graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" @@ -34,7 +34,6 @@ import ( units "github.com/docker/go-units" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -75,21 +74,23 @@ const ( // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount -// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// syscall. A hard upper limit of 500 lower layers is enforced to ensure // that mounts do not fail due to length. const ( linkDir = "l" lowerFile = "lower" - maxDepth = 128 + maxDepth = 500 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation - // is true (512 is a buffer for label metadata). - // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + // is true (512 is a buffer for label metadata, 128 is the + // number of lowers we want to be able to use without having + // to use bind mounts to get all the way to the kernel limit). + // ((idLength + len(linkDir) + 1) * 128) <= (pageSize - 512) idLength = 26 ) @@ -142,8 +143,8 @@ var ( ) func init() { - graphdriver.Register("overlay", Init) - graphdriver.Register("overlay2", Init) + graphdriver.MustRegister("overlay", Init) + graphdriver.MustRegister("overlay2", Init) } func hasMetacopyOption(opts []string) bool { @@ -311,9 +312,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if err != nil { return nil, err } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName + fsName, ok := graphdriver.FsNames[fsMagic] + if !ok { + return nil, fmt.Errorf("filesystem type %#x reported for %s is not supported with 'overlay': %w", fsMagic, filepath.Dir(home), graphdriver.ErrIncompatibleFS) } + backingFs = fsName runhome := filepath.Join(options.RunRoot, filepath.Base(home)) rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) @@ -347,16 +350,16 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) } - if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { return nil, err } } else { if opts.forceMask != nil { return nil, errors.New("'force_mask' is supported only with 'mount_program'") } - // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + // check if they are running over btrfs, aufs, overlay, or ecryptfs switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS) } if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { @@ -580,11 +583,11 @@ func cachedFeatureSet(feature string, set bool) string { } func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { - content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) + content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) if err == nil { return true, string(content), nil } - content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) + content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) if err == nil { return false, string(content), nil } @@ -608,7 +611,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } var contents string - flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) + flagContent, err := os.ReadFile(getMountProgramFlagFile(home)) if err == nil { contents = strings.TrimSpace(string(flagContent)) } @@ -621,7 +624,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { if err != nil && !os.IsNotExist(err) { return false, err } - if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { return false, err } if needsMountProgram { @@ -657,7 +660,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI logLevel = logrus.DebugLevel } - layerDir, err := ioutil.TempDir(home, "compat") + layerDir, err := os.MkdirTemp(home, "compat") if err != nil { patherr, ok := err.(*os.PathError) if ok && patherr.Err == syscall.ENOSPC { @@ -716,7 +719,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI logrus.Debugf("overlay: test mount with multiple lowers succeeded") return supportsDType, nil } - logrus.Debugf("overlay: test mount with multiple lowers failed %v", err) + logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err) } flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) if selinux.GetEnabled() { @@ -728,7 +731,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported) } - logrus.Debugf("overlay: test mount with a single lower failed %v", err) + logrus.Debugf("overlay: test mount with a single lower failed: %v", err) } logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS) @@ -1009,7 +1012,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Write link id to link file - if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } @@ -1030,7 +1033,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } if lower != "" { - if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } @@ -1073,7 +1076,7 @@ func (d *Driver) getLower(parent string) (string, error) { } // Read Parent link fileA - parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + parentLink, err := os.ReadFile(path.Join(parentDir, "link")) if err != nil { if !os.IsNotExist(err) { return "", err @@ -1082,14 +1085,14 @@ func (d *Driver) getLower(parent string) (string, error) { if err := d.recreateSymlinks(); err != nil { return "", fmt.Errorf("recreating the links: %w", err) } - parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link")) + parentLink, err = os.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } } lowers := []string{path.Join(linkDir, string(parentLink))} - parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) @@ -1118,7 +1121,7 @@ func (d *Driver) dir2(id string) (string, bool) { func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string - lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lower := d.dir(s) @@ -1187,7 +1190,7 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { dir := d.dir(id) - lid, err := ioutil.ReadFile(path.Join(dir, "link")) + lid, err := os.ReadFile(path.Join(dir, "link")) if err == nil { if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logrus.Debugf("Failed to remove link: %v", err) @@ -1199,6 +1202,9 @@ func (d *Driver) Remove(id string) error { if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } + if d.quotaCtl != nil { + d.quotaCtl.ClearQuota(dir) + } return nil } @@ -1210,7 +1216,7 @@ func (d *Driver) recreateSymlinks() error { const maxIterations = 10 // List all the directories under the home directory - dirs, err := ioutil.ReadDir(d.home) + dirs, err := os.ReadDir(d.home) if err != nil { return fmt.Errorf("reading driver home directory %q: %w", d.home, err) } @@ -1229,11 +1235,11 @@ func (d *Driver) recreateSymlinks() error { // the layer's "link" file that points to the layer's "diff" directory. for _, dir := range dirs { // Skip over the linkDir and anything that is not a directory - if dir.Name() == linkDir || !dir.Mode().IsDir() { + if dir.Name() == linkDir || !dir.IsDir() { continue } // Read the "link" file under each layer to get the name of the symlink - data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) continue @@ -1258,7 +1264,7 @@ func (d *Driver) recreateSymlinks() error { linkDirFullPath := filepath.Join(d.home, "l") // Now check if we somehow lost a "link" file, by making sure // that each symlink we have corresponds to one. - links, err := ioutil.ReadDir(linkDirFullPath) + links, err := os.ReadDir(linkDirFullPath) if err != nil { errs = multierror.Append(errs, err) continue @@ -1288,11 +1294,11 @@ func (d *Driver) recreateSymlinks() error { // it has the basename of our symlink in it. targetID := targetComponents[1] linkFile := filepath.Join(d.dir(targetID), "link") - data, err := ioutil.ReadFile(linkFile) + data, err := os.ReadFile(linkFile) if err != nil || string(data) != link.Name() { // NOTE: If two or more links point to the same target, we will update linkFile // with every value of link.Name(), and set madeProgress = true every time. - if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } @@ -1312,7 +1318,7 @@ func (d *Driver) recreateSymlinks() error { } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return d.get(id, false, options) } @@ -1346,7 +1352,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } if !d.usingMetacopy { if hasMetacopyOption(optsList) { - logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + if d.options.mountProgram == "" { + release := "" + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + release = " " + string(uts.Release[:]) + " " + string(uts.Version[:]) + } + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release) + } else { + logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it") + } } optsList = stripOption(optsList, "metacopy=on") } @@ -1358,7 +1373,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + lowers, err := os.ReadFile(path.Join(dir, lowerFile)) if err != nil && !os.IsNotExist(err) { return "", err } @@ -1367,27 +1382,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", errors.New("max depth exceeded") } - // absLowers is the list of lowers as absolute paths, which works well with additional stores. + // absLowers is the list of lowers as absolute paths. absLowers := []string{} - // relLowers is the list of lowers as paths relative to the driver's home directory. - relLowers := []string{} - // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers - // lists that we're building. "diff" itself is the upper, so it won't be in the lists. - link, err := ioutil.ReadFile(path.Join(dir, "link")) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link")) - if err := d.recreateSymlinks(); err != nil { - return "", fmt.Errorf("recreating the links: %w", err) - } - link, err = ioutil.ReadFile(path.Join(dir, "link")) - if err != nil { - return "", err - } - } diffN := 1 perms := defaultPerms if d.options.forceMask != nil { @@ -1401,7 +1398,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1426,6 +1422,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO perms = os.FileMode(st2.Mode()) permsKnown = true } + l = lower break } lower = "" @@ -1450,12 +1447,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower = newpath } absLowers = append(absLowers, lower) - relLowers = append(relLowers, l) diffN = 1 _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) diffN++ _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } @@ -1463,7 +1458,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) - relLowers = append(relLowers, path.Join(id, "empty")) } // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) @@ -1597,28 +1591,23 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return nil } } else if len(mountData) >= pageSize { - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. + // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if + // the mount data cannot fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chdir(). workdir = path.Join(id, "work") - //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) } if len(optsList) > 0 { opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) >= pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) - } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) + return mountOverlayFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, "merged") } @@ -1649,7 +1638,7 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } @@ -1658,7 +1647,7 @@ func (d *Driver) Put(id string) error { mappedRoot := filepath.Join(d.home, id, "mapped") // It should not happen, but cleanup any mapped mount if it was leaked. if _, err := os.Stat(mappedRoot); err == nil { - mounts, err := ioutil.ReadDir(mappedRoot) + mounts, err := os.ReadDir(mappedRoot) if err == nil { // Go through all of the mapped mounts. for _, m := range mounts { @@ -1712,6 +1701,40 @@ func (d *Driver) Exists(id string) bool { return err == nil } +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.home) + if err != nil { + return nil, err + } + + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + layers = append(layers, id) + } + + return layers, err +} + // isParent returns if the passed in parent is the direct parent of the passed in layer func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) @@ -1806,7 +1829,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = ioutil.TempDir(d.getStagingDir(), "") + applyDir, err = os.MkdirTemp(d.getStagingDir(), "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1826,7 +1849,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App GIDMaps: idMappings.GIDs(), IgnoreChownErrors: d.options.ignoreChownErrors, WhiteoutFormat: d.getWhiteoutFormat(), - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), }) out.Target = applyDir return out, err @@ -1884,7 +1907,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) IgnoreChownErrors: d.options.ignoreChownErrors, ForceMask: d.options.forceMask, WhiteoutFormat: d.getWhiteoutFormat(), - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), }); err != nil { return 0, err } @@ -2167,7 +2190,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } // tell the additional layer store that we use this layer. // mark this layer as "additional layer" - if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { return err } notifyUseAdditionalLayer(al.path) @@ -2175,7 +2198,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) { - al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer")) + al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer")) if err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go index 0b70a5d92bc..88bfbf9c745 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 1cdac777751..2a7a307a294 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go index 49af84a229f..33b163a8c2a 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 0e6a47fc92c..f5484dee742 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -51,8 +51,8 @@ struct fsxattr { */ import "C" import ( + "errors" "fmt" - "io/ioutil" "math" "os" "path" @@ -79,6 +79,7 @@ type Control struct { backingFsBlockDev string nextProjectID uint32 quotas map[string]uint32 + basePath string } // Attempt to generate a unigue projectid. Multiple directories @@ -123,11 +124,9 @@ func generateUniqueProjectID(path string) (uint32, error) { // This is a way to prevent xfs_quota management from conflicting with // containers/storage. -// // Then try to create a test directory with the next project id and set a quota // on it. If that works, continue to scan existing containers to map allocated // project ids. -// func NewControl(basePath string) (*Control, error) { // // Get project id of parent dir as minimal id to be used by driver @@ -161,20 +160,22 @@ func NewControl(basePath string) (*Control, error) { Size: 0, Inodes: 0, } - if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { - return nil, err - } q := Control{ backingFsBlockDev: backingFsBlockDev, nextProjectID: minProjectID + 1, quotas: make(map[string]uint32), + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err } // // get first project id to be used for next container // - err = q.findNextProjectID(basePath) + err = q.findNextProjectID() if err != nil { return nil, err } @@ -207,11 +208,17 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { // set the quota limit for the container's project id // logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) - return setProjectQuota(q.backingFsBlockDev, projectID, quota) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + delete(q.quotas, targetPath) } // setProjectQuota - set the quota for project id on xfs block device -func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { var d C.fs_disk_quota_t d.d_version = C.FS_DQUOT_VERSION d.d_id = C.__u32(projectID) @@ -228,15 +235,35 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er d.d_ino_softlimit = d.d_ino_hardlimit } - var cs = C.CString(backingFsBlockDev) + var cs = C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", - projectID, backingFsBlockDev, errno) + projectID, q.backingFsBlockDev, errno) } return nil @@ -335,16 +362,16 @@ func setProjectID(targetPath string, projectID uint32) error { // findNextProjectID - find the next project id to be used for containers // by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID(home string) error { - files, err := ioutil.ReadDir(home) +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) if err != nil { - return fmt.Errorf("read directory failed : %s", home) + return fmt.Errorf("read directory failed : %s", q.basePath) } for _, file := range files { if !file.IsDir() { continue } - path := filepath.Join(home, file.Name()) + path := filepath.Join(q.basePath, file.Name()) projid, err := getProjectID(path) if err != nil { return err diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go index 7743dcedbd0..bbb9cb6570d 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_aufs && linux // +build !exclude_graphdriver_aufs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go index 40ff1cdd0df..425ebd798ee 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_btrfs && linux // +build !exclude_graphdriver_btrfs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go index cefe2e8c754..a744eaea174 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_devicemapper && linux && cgo // +build !exclude_graphdriver_devicemapper,linux,cgo package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go index 30e3b4d7475..95b77b73e24 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_overlay && linux && cgo // +build !exclude_graphdriver_overlay,linux,cgo package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index 4623e7f4648..8e5788a437f 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,3 +1,4 @@ +//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris // +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go index d40d71cfc1e..7b96c082d79 100644 --- a/vendor/github.com/containers/storage/drivers/template.go +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -1,9 +1,8 @@ package graphdriver import ( - "github.com/sirupsen/logrus" - "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" ) // TemplateDriver is just barely enough of a driver that we can implement a diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go index 8ac80ee1dba..d94756bdd1c 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package vfs // import "github.com/containers/storage/drivers/vfs" diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index b1073d55fe5..9deaa7c3a86 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -8,6 +8,7 @@ import ( "runtime" "strconv" "strings" + "unicode" graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" @@ -28,7 +29,7 @@ var ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("vfs", Init) + graphdriver.MustRegister("vfs", Init) } // Init returns a new VFS driver. @@ -98,7 +99,7 @@ func (d *Driver) Status() [][2]string { // Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. func (d *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil + return nil, nil //nolint: nilnil } // Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. @@ -194,7 +195,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool if parent != "" { parentDir, err := d.Get(parent, graphdriver.MountOpts{}) if err != nil { - return fmt.Errorf("%s: %s", parent, err) + return fmt.Errorf("%s: %w", parent, err) } if err := dirCopy(parentDir, dir); err != nil { return err @@ -265,6 +266,40 @@ func (d *Driver) Exists(id string) bool { return err == nil } +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.homes[0]) + if err != nil { + return nil, err + } + + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + layers = append(layers, id) + } + + return layers, err +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { if len(d.homes) > 1 { diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 7baf6c075a3..66aa460cf83 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -24,7 +23,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/backuptar" "github.com/Microsoft/hcsshim" - "github.com/containers/storage/drivers" + graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" @@ -54,7 +53,7 @@ var ( // init registers the windows graph drivers to the register. func init() { - graphdriver.Register("windowsfilter", InitFilter) + graphdriver.MustRegister("windowsfilter", InitFilter) // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes // debugging issues in the re-exec codepath significantly easier. if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { @@ -186,6 +185,11 @@ func (d *Driver) Exists(id string) bool { return result } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // CreateFromTemplate creates a layer with the same contents and parent as another layer. func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) @@ -475,7 +479,7 @@ func (d *Driver) Put(id string) error { // We use this opportunity to cleanup any -removing folders which may be // still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) + items, err := os.ReadDir(d.info.HomeDir) if err != nil { if os.IsNotExist(err) { return nil @@ -870,7 +874,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths .. // resolveID computes the layerID information based on the given id. func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID")) if os.IsNotExist(err) { return id, nil } else if err != nil { @@ -881,13 +885,13 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) } // getLayerChain returns the layer chain information. func (d *Driver) getLayerChain(id string) ([]string, error) { jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) + content, err := os.ReadFile(jPath) if os.IsNotExist(err) { return nil, nil } else if err != nil { @@ -911,7 +915,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error { } jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) + err = os.WriteFile(jPath, content, 0600) if err != nil { return fmt.Errorf("unable to write layerchain file - %s", err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index eedaeed9dd3..aeef6410331 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -18,7 +18,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/mistifyio/go-zfs" + zfs "github.com/mistifyio/go-zfs/v3" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -33,7 +33,7 @@ type zfsOptions struct { const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("zfs", Init) + graphdriver.MustRegister("zfs", Init) } // Logger returns a zfs logger implementation. @@ -57,12 +57,12 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) } - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) + file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600) if err != nil { logger.Debugf("cannot open /dev/zfs: %v", err) return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) } - defer file.Close() + defer unix.Close(file) options, err := parseOptions(opt.DriverOptions) if err != nil { @@ -506,6 +506,11 @@ func (d *Driver) Exists(id string) bool { return d.filesystemsCache[d.zfsPath(id)] } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go index 643b169bc5c..738b0ae1be6 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -1,11 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index 9567fe90c62..be9e45cfd08 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,11 +1,11 @@ package storage import ( - "errors" "fmt" "strings" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/types" "github.com/google/go-intervals/intervalset" ) @@ -116,7 +116,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) { n -= i.length() } if n > 0 { - return nil, errors.New("could not find enough available IDs") + return nil, types.ErrNoAvailableIDs } return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil } diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index e3008ea6ca1..577b6f8ed40 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -1,9 +1,7 @@ package storage import ( - "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -11,6 +9,7 @@ import ( "time" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/truncindex" @@ -95,11 +94,17 @@ type Image struct { Flags map[string]interface{} `json:"flags,omitempty"` } -// ROImageStore provides bookkeeping for information about Images. -type ROImageStore interface { - ROFileBasedStore - ROMetadataStore - ROBigDataStore +// roImageStore provides bookkeeping for information about Images. +type roImageStore interface { + roMetadataStore + roBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Exists checks if there is an image with the given ID or name. Exists(id string) bool @@ -107,10 +112,6 @@ type ROImageStore interface { // Get retrieves information about an image given an ID or name. Get(id string) (*Image, error) - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - // Images returns a slice enumerating the known images. Images() ([]Image, error) @@ -121,51 +122,55 @@ type ROImageStore interface { ByDigest(d digest.Digest) ([]*Image, error) } -// ImageStore provides bookkeeping for information about Images. -type ImageStore interface { - ROImageStore - RWFileBasedStore - RWMetadataStore - RWImageBigDataStore - FlaggableStore +// rwImageStore provides bookkeeping for information about Images. +type rwImageStore interface { + roImageStore + rwMetadataStore + rwImageBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() // Create creates an image that has a specified ID (or a random one) and // optional names, using the specified layer as its topmost (hopefully // read-only) layer. That layer can be referenced by multiple images. Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) - // SetNames replaces the list of names associated with an image with the - // supplied values. The values are expected to be valid normalized - // named image references. - // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. - SetNames(id string, names []string) error - - // AddNames adds the supplied values to the list of names associated with the image with - // the specified id. The values are expected to be valid normalized - // named image references. - AddNames(id string, names []string) error - - // RemoveNames removes the supplied values from the list of names associated with the image with - // the specified id. The values are expected to be valid normalized + // updateNames modifies names associated with an image based on (op, names). + // The values are expected to be valid normalized // named image references. - RemoveNames(id string, names []string) error + updateNames(id string, names []string, op updateNameOperation) error // Delete removes the record of the image. Delete(id string) error + addMappedTopLayer(id, layer string) error + removeMappedTopLayer(id, layer string) error + // Wipe removes records of all images. Wipe() error } type imageStore struct { - lockfile Locker + // The following fields are only set when constructing imageStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. dir string - images []*Image - idindex *truncindex.TruncIndex - byid map[string]*Image - byname map[string]*Image - bydigest map[digest.Digest][]*Image - loadMut sync.Mutex + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image } func copyImage(i *Image) *Image { @@ -198,6 +203,191 @@ func copyImageSlice(slice []*Image) []*Image { return nil } +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *imageStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *imageStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate image names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *imageStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *imageStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *imageStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. func (r *imageStore) Images() ([]Image, error) { images := make([]Image, len(r.images)) for i := range r.images { @@ -228,6 +418,7 @@ func bigDataNameIsManifest(name string) bool { // recomputeDigests takes a fixed digest and a name-to-digest map and builds a // list of the unique values that would identify the image. +// The caller must hold r.inProcessLock for writing. func (i *Image) recomputeDigests() error { validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) digests := make(map[digest.Digest]struct{}) @@ -242,8 +433,8 @@ func (i *Image) recomputeDigests() error { if !bigDataNameIsManifest(name) { continue } - if digest.Validate() != nil { - return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, digest.Validate()) + if err := digest.Validate(); err != nil { + return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err) } // Deduplicate the digest values. if _, known := digests[digest]; !known { @@ -258,65 +449,85 @@ func (i *Image) recomputeDigests() error { return nil } -func (r *imageStore) Load() error { - shouldSave := false +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *imageStore) load(lockedForWriting bool) (bool, error) { rpath := r.imagespath() - data, err := ioutil.ReadFile(rpath) + data, err := os.ReadFile(rpath) if err != nil && !os.IsNotExist(err) { - return err + return false, err } + images := []*Image{} - idlist := []string{} + if len(data) != 0 { + if err := json.Unmarshal(data, &images); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + idlist := make([]string, 0, len(images)) ids := make(map[string]*Image) names := make(map[string]*Image) digests := make(map[digest.Digest][]*Image) - if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(images)) - for n, image := range images { - ids[image.ID] = images[n] - idlist = append(idlist, image.ID) - for _, name := range image.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } + var errorToResolveBySaving error // == nil + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateImageNames } - // Compute the digest list. - err = image.recomputeDigests() - if err != nil { - return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) - } - for _, name := range image.Names { - names[name] = image - } - for _, digest := range image.Digests { - list := digests[digest] - digests[digest] = append(list, image) - } - image.ReadOnly = !r.IsReadWrite() } + // Compute the digest list. + if err := image.recomputeDigests(); err != nil { + return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.lockfile.IsReadWrite() } - if shouldSave && (!r.IsReadWrite() || !r.Locked()) { - return ErrDuplicateImageNames + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } } r.images = images - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.byname = names r.bydigest = digests - if shouldSave { - return r.Save() + if errorToResolveBySaving != nil { + return false, r.Save() } - return nil + return false, nil } +// Save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). func (r *imageStore) Save() error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } - if !r.Locked() { - return errors.New("image store is not locked for writing") - } + r.lockfile.AssertLockedForWriting() rpath := r.imagespath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { return err @@ -325,55 +536,77 @@ func (r *imageStore) Save() error { if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) + if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { + return err + } + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + return nil } -func newImageStore(dir string) (ImageStore, error) { +func newImageStore(dir string) (rwImageStore, error) { if err := os.MkdirAll(dir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock")) if err != nil { return nil, err } - lockfile.Lock() - defer lockfile.Unlock() istore := imageStore{ lockfile: lockfile, dir: dir, + images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), bydigest: make(map[digest.Digest][]*Image), } - if err := istore.Load(); err != nil { + if err := istore.startWritingWithReload(false); err != nil { + return nil, err + } + defer istore.stopWriting() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(true); err != nil { return nil, err } return &istore, nil } -func newROImageStore(dir string) (ROImageStore, error) { - lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) +func newROImageStore(dir string) (roImageStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock")) if err != nil { return nil, err } - lockfile.RLock() - defer lockfile.Unlock() istore := imageStore{ lockfile: lockfile, dir: dir, + images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), bydigest: make(map[digest.Digest][]*Image), } - if err := istore.Load(); err != nil { + if err := istore.startReadingWithReload(false); err != nil { + return nil, err + } + defer istore.stopReading() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(false); err != nil { return nil, err } return &istore, nil } +// Requires startReading or startWriting. func (r *imageStore) lookup(id string) (*Image, bool) { if image, ok := r.byid[id]; ok { return image, ok @@ -386,8 +619,9 @@ func (r *imageStore) lookup(id string) (*Image, bool) { return nil, false } +// Requires startWriting. func (r *imageStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) @@ -398,8 +632,9 @@ func (r *imageStore) ClearFlag(id string, flag string) error { return r.Save() } +// Requires startWriting. func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) @@ -413,8 +648,9 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { return r.Save() } +// Requires startWriting. func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if id == "" { @@ -456,7 +692,9 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c return nil, fmt.Errorf("validating digests for new image: %w", err) } r.images = append(r.images, image) - r.idindex.Add(id) + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) r.byid[id] = image for _, name := range names { r.byname[name] = image @@ -470,6 +708,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c return image, err } +// Requires startWriting. func (r *imageStore) addMappedTopLayer(id, layer string) error { if image, ok := r.lookup(id); ok { image.MappedTopLayers = append(image.MappedTopLayers, layer) @@ -478,6 +717,7 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startWriting. func (r *imageStore) removeMappedTopLayer(id, layer string) error { if image, ok := r.lookup(id); ok { initialLen := len(image.MappedTopLayers) @@ -491,6 +731,7 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startReading or startWriting. func (r *imageStore) Metadata(id string) (string, error) { if image, ok := r.lookup(id); ok { return image.Metadata, nil @@ -498,8 +739,9 @@ func (r *imageStore) Metadata(id string) (string, error) { return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startWriting. func (r *imageStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if image, ok := r.lookup(id); ok { @@ -509,29 +751,19 @@ func (r *imageStore) SetMetadata(id, metadata string) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// The caller must hold r.inProcessLock for writing. func (r *imageStore) removeName(image *Image, name string) { image.Names = stringSliceWithoutValue(image.Names, name) } +// The caller must hold r.inProcessLock for writing. func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } -// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. -func (r *imageStore) SetNames(id string, names []string) error { - return r.updateNames(id, names, setNames) -} - -func (r *imageStore) AddNames(id string, names []string) error { - return r.updateNames(id, names, addNames) -} - -func (r *imageStore) RemoveNames(id string, names []string) error { - return r.updateNames(id, names, removeNames) -} - +// Requires startWriting. func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) @@ -557,8 +789,9 @@ func (r *imageStore) updateNames(id string, names []string, op updateNameOperati return r.Save() } +// Requires startWriting. func (r *imageStore) Delete(id string) error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) @@ -573,7 +806,9 @@ func (r *imageStore) Delete(id string) error { } } delete(r.byid, id) - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) for _, name := range image.Names { delete(r.byname, name) } @@ -602,6 +837,7 @@ func (r *imageStore) Delete(id string) error { return nil } +// Requires startReading or startWriting. func (r *imageStore) Get(id string) (*Image, error) { if image, ok := r.lookup(id); ok { return copyImage(image), nil @@ -609,18 +845,13 @@ func (r *imageStore) Get(id string) (*Image, error) { return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } -func (r *imageStore) Lookup(name string) (id string, err error) { - if image, ok := r.lookup(name); ok { - return image.ID, nil - } - return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) -} - +// Requires startReading or startWriting. func (r *imageStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { if images, ok := r.bydigest[d]; ok { return copyImageSlice(images), nil @@ -628,6 +859,7 @@ func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown) } +// Requires startReading or startWriting. func (r *imageStore) BigData(id, key string) ([]byte, error) { if key == "" { return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName) @@ -636,9 +868,10 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) { if !ok { return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - return ioutil.ReadFile(r.datapath(image.ID, key)) + return os.ReadFile(r.datapath(image.ID, key)) } +// Requires startReading or startWriting. func (r *imageStore) BigDataSize(id, key string) (int64, error) { if key == "" { return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName) @@ -647,10 +880,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) { if !ok { return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) - } - if size, ok := image.BigDataSizes[key]; ok { + if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -659,6 +889,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } +// Requires startReading or startWriting. func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName) @@ -667,15 +898,13 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { if !ok { return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := image.BigDataDigests[key]; ok { + if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil. return d, nil } return "", ErrDigestUnknown } +// Requires startReading or startWriting. func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { @@ -695,11 +924,12 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { return modified } +// Requires startWriting. func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { if key == "" { return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) } - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) @@ -779,8 +1009,9 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func return err } +// Requires startWriting. func (r *imageStore) Wipe() error { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) @@ -794,50 +1025,3 @@ func (r *imageStore) Wipe() error { } return nil } - -func (r *imageStore) Lock() { - r.lockfile.Lock() -} - -func (r *imageStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *imageStore) RLock() { - r.lockfile.RLock() -} - -func (r *imageStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *imageStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *imageStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *imageStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *imageStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *imageStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *imageStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err -} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index d24625a22fd..f14108be5aa 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -19,6 +18,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" @@ -27,7 +27,7 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/selinux/go-selinux/label" + "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "github.com/vbatts/tar-split/tar/asm" @@ -37,8 +37,28 @@ import ( const ( tarSplitSuffix = ".tar-split.gz" incompleteFlag = "incomplete" + // maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state + // in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state) + // until we just give up. + maxLayerStoreCleanupIterations = 3 ) +type layerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableLayerLocation layerLocations = 1 << iota + volatileLayerLocation + + numLayerLocationIndex = iota +) + +func layerLocationFromIndex(index int) layerLocations { + return 1 << index +} + // A Layer is a record of a copy-on-write layer that's stored by the lower // level graph driver. type Layer struct { @@ -63,13 +83,26 @@ type Layer struct { MountLabel string `json:"mountlabel,omitempty"` // MountPoint is the path where the layer is mounted, or where it was most - // recently mounted. This can change between subsequent Unmount() and - // Mount() calls, so the caller should consult this value after Mount() - // succeeds to find the location of the container's root filesystem. + // recently mounted. + // + // WARNING: This field is a snapshot in time: (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // Users that need to handle concurrent mount/unmount attempts should not access this + // field at all, and should only use the path returned by .Mount() (and that’s only + // assuming no other user will concurrently decide to unmount that mount point). MountPoint string `json:"-"` // MountCount is used as a reference count for the container's layer being // mounted at the mount point. + // + // WARNING: This field is a snapshot in time; (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. MountCount int `json:"-"` // Created is the datestamp for when this layer was created. Older @@ -120,6 +153,9 @@ type Layer struct { // ReadOnly is true if this layer resides in a read-only layer store. ReadOnly bool `json:"-"` + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` + // BigDataNames is a list of names of data items that we keep for the // convenience of the caller. They can be large, and are only in // memory when being read from or written to disk. @@ -138,13 +174,19 @@ type DiffOptions struct { Compression *archive.Compression } -// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// roLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. -type ROLayerStore interface { - ROFileBasedStore - ROMetadataStore - ROLayerBigDataStore +type roLayerStore interface { + roMetadataStore + roLayerBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Exists checks if a layer with the specified name or ID is known. Exists(id string) bool @@ -178,10 +220,6 @@ type ROLayerStore interface { // found, it returns an error. Size(name string) (int64, error) - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - // LayersByCompressedDigest returns a slice of the layers with the // specified compressed digest value recorded for them. LayersByCompressedDigest(d digest.Digest) ([]Layer, error) @@ -194,15 +232,21 @@ type ROLayerStore interface { Layers() ([]Layer, error) } -// LayerStore wraps a graph driver, adding the ability to refer to layers by +// rwLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. -type LayerStore interface { - ROLayerStore - RWFileBasedStore - RWMetadataStore - FlaggableStore - RWLayerBigDataStore +type rwLayerStore interface { + roLayerStore + rwMetadataStore + flaggableStore + rwLayerBigDataStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() // Create creates a new layer, optionally giving it a specified ID rather than // a randomly-generated one, either inheriting data from another specified @@ -219,18 +263,8 @@ type LayerStore interface { // Put combines the functions of CreateWithFlags and ApplyDiff. Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) - // SetNames replaces the list of names associated with a layer with the - // supplied values. - // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. - SetNames(id string, names []string) error - - // AddNames adds the supplied values to the list of names associated with the layer with the - // specified id. - AddNames(id string, names []string) error - - // RemoveNames remove the supplied values from the list of names associated with the layer with the - // specified id. - RemoveNames(id string, names []string) error + // updateNames modifies names associated with a layer based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -244,8 +278,15 @@ type LayerStore interface { // The mappings used by the container can be specified. Mount(id string, options drivers.MountOpts) (string, error) - // Unmount unmounts a layer when it is no longer in use. - Unmount(id string, force bool) (bool, error) + // unmount unmounts a layer when it is no longer in use. + // If conditional is set, it will fail with ErrLayerNotMounted if the layer is not mounted (without conditional, the caller is + // making a promise that the layer is actually mounted). + // If force is set, it will physically try to unmount it even if it is mounted multple times, or even if (!conditional and) + // there are no records of it being mounted in the first place. + // It returns whether the layer was still mounted at the time this function returned. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. + unmount(id string, force bool, conditional bool) (bool, error) // Mounted returns number of times the layer has been mounted. Mounted(id string) (int, error) @@ -271,33 +312,52 @@ type LayerStore interface { // DifferTarget gets the location where files are stored for the layer. DifferTarget(id string) (string, error) - // LoadLocked wraps Load in a locked state. This means it loads the store - // and cleans-up invalid layers if needed. - LoadLocked() error - // PutAdditionalLayer creates a layer using the diff contained in the additional layer // store. // This API is experimental and can be changed without bumping the major version number. PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) + + // Clean up unreferenced layers + GarbageCollect() error + + // supportsShifting() returns true if the driver.Driver.SupportsShifting(). + supportsShifting() bool } type layerStore struct { - lockfile Locker - mountsLockfile Locker - rundir string - driver drivers.Driver - layerdir string - layers []*Layer - idindex *truncindex.TruncIndex - byid map[string]*Layer - byname map[string]*Layer - bymount map[string]*Layer - bycompressedsum map[digest.Digest][]string - byuncompressedsum map[digest.Digest][]string - uidMap []idtools.IDMap - gidMap []idtools.IDMap - loadMut sync.Mutex - layerspathModified time.Time + // The following fields are only set when constructing layerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. + rundir string + jsonPath [numLayerLocationIndex]string + layerdir string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite() + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + layerspathsModified [numLayerLocationIndex]time.Time + + // FIXME: This field is only set when constructing layerStore, but locking rules of the driver + // interface itself are not documented here. + driver drivers.Driver +} + +// The caller must hold r.inProcessLock for reading. +func layerLocation(l *Layer) layerLocations { + if l.volatileStore { + return volatileLayerLocation + } + return stableLayerLocation } func copyLayer(l *Layer) *Layer { @@ -316,6 +376,7 @@ func copyLayer(l *Layer) *Layer { UncompressedSize: l.UncompressedSize, CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, + volatileStore: l.volatileStore, BigDataNames: copyStringSlice(l.BigDataNames), Flags: copyStringInterfaceMap(l.Flags), UIDMap: copyIDMap(l.UIDMap), @@ -325,6 +386,271 @@ func copyLayer(l *Layer) *Layer { } } +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startWriting() instead. +func (r *layerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *layerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *layerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startReading() instead. +func (r *layerStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + cleanupsDone := 0 + for { + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + err := inProcessLocked(func() error { + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }) + if err == nil { + break + } + if !tryLockedForWriting { + return err + } + if cleanupsDone >= maxLayerStoreCleanupIterations { + return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err) + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload again because the on-disk state could have been modified + // after we released the lock. + cleanupsDone++ + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *layerStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *layerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state (of layers or mounts) has changed (ie if reloadIcHanged may need to modify the store) +// +// Note that unlike containerStore.modified and imageStore.modified, this function is not directly used in layerStore.reloadIfChanged(); +// it exists only to help the reader ensure it has fresh enough state. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) modified() (bool, error) { + _, m, err := r.layersModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + if r.lockfile.IsReadWrite() { + // This means we get, release, and re-obtain, r.mountsLockfile if we actually need to do any kind of reload. + // That’s a bit expensive, but hopefully most callers will be read-only and see no changes. + // We can’t eliminate these mountsLockfile accesses given the current assumption that Layer objects have _some_ not-very-obsolete + // mount data. Maybe we can segregate the mount-dependent and mount-independent operations better... + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + _, m, err := r.mountsModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + } + return false, nil +} + +// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the +// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful +// reload. +// It should only be called with the lock held. +// The caller must hold r.inProcessLock for reading. +func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) { + lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite) + if err != nil { + return lockfile.LastWrite{}, modified, err + } + if modified { + return lastWrite, true, nil + } + + // If the layers.json file or container-layers.json has been + // modified manually, then we have to reload the storage in + // any case. + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + info, err := os.Stat(r.jsonPath[locationIndex]) + if err != nil && !os.IsNotExist(err) { + return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err) + } + if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] { + // In this case the LastWrite value is equal to r.lastWrite; writing it back doesn’t hurt. + return lastWrite, true, nil + } + } + + return lockfile.LastWrite{}, false, nil +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, layersModified, err := r.layersModified() + if err != nil { + return false, err + } + if layersModified { + // r.load also reloads mounts data; so, on this path, we don’t need to call reloadMountsIfChanged. + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + return false, nil + } + if r.lockfile.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err := r.reloadMountsIfChanged(); err != nil { + return false, err + } + } + return false, nil +} + +// mountsModified returns true if the on-disk mount state has changed (i.e. if reloadMountsIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.mountsLockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) mountsModified() (lockfile.LastWrite, bool, error) { + return r.mountsLockfile.ModifiedSince(r.mountsLastWrite) +} + +// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed. +// +// The caller must hold r.mountsLockFile for reading or writing. +func (r *layerStore) reloadMountsIfChanged() error { + lastWrite, modified, err := r.mountsModified() + if err != nil { + return err + } + if modified { + if err = r.loadMounts(); err != nil { + return err + } + r.mountsLastWrite = lastWrite + } + return nil +} + +// Requires startReading or startWriting. func (r *layerStore) Layers() ([]Layer, error) { layers := make([]Layer, len(r.layers)) for i := range r.layers { @@ -333,109 +659,209 @@ func (r *layerStore) Layers() ([]Layer, error) { return layers, nil } +// Requires startWriting. +func (r *layerStore) GarbageCollect() error { + layers, err := r.driver.ListLayers() + + if err != nil { + if errors.Is(err, drivers.ErrNotSupported) { + return nil + } + return err + } + + for _, id := range layers { + // Is the id still referenced + if r.byid[id] != nil { + continue + } + + // Remove layer and any related data of unreferenced id + if err := r.driver.Remove(id); err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + } + return nil +} + func (r *layerStore) mountspath() string { return filepath.Join(r.rundir, "mountpoints.json") } -func (r *layerStore) layerspath() string { - return filepath.Join(r.layerdir, "layers.json") -} +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// As a side effect, this sets r.mountsLastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *layerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations layerLocations -func (r *layerStore) Load() error { - shouldSave := false - rpath := r.layerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } layers := []*Layer{} - idlist := []string{} ids := make(map[string]*Layer) - names := make(map[string]*Layer) - compressedsums := make(map[digest.Digest][]string) - uncompressedsums := make(map[digest.Digest][]string) - if r.IsReadWrite() { - label.ClearLabels() - } - if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(layers)) - for n, layer := range layers { - ids[layer.ID] = layers[n] - idlist = append(idlist, layer.ID) - for _, name := range layer.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } - names[name] = layers[n] + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + info, err := os.Stat(rpath) + if err != nil { + if !os.IsNotExist(err) { + return false, err } - if layer.CompressedDigest != "" { - compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } else { + r.layerspathsModified[locationIndex] = info.ModTime() + } + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationLayers := []*Layer{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationLayers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) } - if layer.UncompressedDigest != "" { - uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + + for _, layer := range locationLayers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[layer.ID] != nil { + continue // skip invalid duplicated layer } - if layer.MountLabel != "" { - label.ReserveLabel(layer.MountLabel) + // Remember where the layer came from + if location == volatileLayerLocation { + layer.volatileStore = true } - layer.ReadOnly = !r.IsReadWrite() + layers = append(layers, layer) + ids[layer.ID] = layer + } + } + + idlist := make([]string, 0, len(layers)) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them. + if r.lockfile.IsReadWrite() { + selinux.ClearLabels() + } + for n, layer := range layers { + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateLayerNames + modifiedLocations |= layerLocation(conflict) + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + selinux.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.lockfile.IsReadWrite() + // The r.lockfile.IsReadWrite() condition maintains past practice: + // Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store + // (OTOH creating child layers on top would probably lead to problems?). + // We do remove incomplete layers in read-write stores so that we don’t build on top of them. + if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() { + errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up") } - err = nil } - if shouldSave && (!r.IsReadWrite() || !r.Locked()) { - return ErrDuplicateLayerNames + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } } r.layers = layers - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.byname = names r.bycompressedsum = compressedsums r.byuncompressedsum = uncompressedsums // Load and merge information about which layers are mounted, and where. - if r.IsReadWrite() { + if r.lockfile.IsReadWrite() { r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() - if err = r.loadMounts(); err != nil { - return err + // We need to reload mounts unconditionally, becuause by creating r.layers from scratch, we have discarded the previous + // information, if any. So, obtain a fresh mountsLastWrite value so that we don’t unnecessarily reload the data + // afterwards. + mountsLastWrite, err := r.mountsLockfile.GetLastWrite() + if err != nil { + return false, err } + if err := r.loadMounts(); err != nil { + return false, err + } + r.mountsLastWrite = mountsLastWrite + // NOTE: We will release mountsLockfile when this function returns, so unlike most of the layer data, the + // r.layers[].MountPoint, r.layers[].MountCount, and r.bymount values might not reflect + // true on-filesystem state already by the time this function returns. + // Code that needs the state to be accurate must lock r.mountsLockfile again, + // and possibly loadMounts() again. + } - // Last step: as we’re writable, try to remove anything that a previous + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite") + } + // Last step: try to remove anything that a previous // user of this storage area marked for deletion but didn't manage to // actually delete. - if r.Locked() { - for _, layer := range r.layers { - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } - if layerHasIncompleteFlag(layer) { - logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true + var incompleteDeletionErrors error // = nil + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err := r.deleteInternal(layer.ID) + if err != nil { + // Don't return the error immediately, because deleteInternal does not saveLayers(); + // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully + // deleted incomplete layers have their metadata correctly removed. + incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, + fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) } + modifiedLocations |= layerLocation(layer) } } - if shouldSave { - return r.saveLayers() + if err := r.saveLayers(modifiedLocations); err != nil { + return false, err + } + if incompleteDeletionErrors != nil { + return false, incompleteDeletionErrors } } - - return err -} - -func (r *layerStore) LoadLocked() error { - r.lockfile.Lock() - defer r.lockfile.Unlock() - return r.Load() + return false, nil } +// The caller must hold r.mountsLockfile for reading or writing. +// The caller must hold r.inProcessLock for WRITING. func (r *layerStore) loadMounts() error { mounts := make(map[string]*Layer) mpath := r.mountspath() - data, err := ioutil.ReadFile(mpath) + data, err := os.ReadFile(mpath) if err != nil && !os.IsNotExist(err) { return err } @@ -469,42 +895,77 @@ func (r *layerStore) loadMounts() error { return err } -func (r *layerStore) Save() error { +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) save(saveLocations layerLocations) error { r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - defer r.mountsLockfile.Touch() - if err := r.saveLayers(); err != nil { + if err := r.saveLayers(saveLocations); err != nil { return err } return r.saveMounts() } -func (r *layerStore) saveLayers() error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly) - } - if !r.Locked() { - return errors.New("layer store is not locked for writing") +// saveFor saves the contents of the store relevant for modifiedLayer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveFor(modifiedLayer *Layer) error { + return r.save(layerLocation(modifiedLayer)) +} + +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveLayers(saveLocations layerLocations) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - rpath := r.layerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err + r.lockfile.AssertLockedForWriting() + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + subsetLayers := make([]*Layer, 0, len(r.layers)) + for _, layer := range r.layers { + if layerLocation(layer) == location { + subsetLayers = append(subsetLayers, layer) + } + } + + jldata, err := json.Marshal(&subsetLayers) + if err != nil { + return err + } + opts := ioutils.AtomicFileWriterOptions{} + if location == volatileLayerLocation { + opts.NoSync = true + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil { + return err + } + r.layerspathsModified[locationIndex] = opts.ModTime } - jldata, err := json.Marshal(&r.layers) + lw, err := r.lockfile.RecordWrite() if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jldata, 0600) + r.lastWrite = lw + return nil } +// The caller must hold r.mountsLockfile for writing. +// The caller must hold r.inProcessLock for WRITING. func (r *layerStore) saveMounts() error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly) - } - if !r.mountsLockfile.Locked() { - return errors.New("layer store mount information is not locked for writing") + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) } + r.mountsLockfile.AssertLockedForWriting() mpath := r.mountspath() if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err @@ -526,63 +987,107 @@ func (r *layerStore) saveMounts() error { if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { return err } + lw, err := r.mountsLockfile.RecordWrite() + if err != nil { + return err + } + r.mountsLastWrite = lw return r.loadMounts() } -func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { +func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { if err := os.MkdirAll(rundir, 0700); err != nil { return nil, err } if err := os.MkdirAll(layerdir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + // Note: While the containers.lock file is in rundir for transient stores + // we don't want to do this here, because the non-transient layers in + // layers.json might be used externally as a read-only layer (using e.g. + // additionalimagestores), and that would look for the lockfile in the + // same directory + lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } - mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) if err != nil { return nil, err } + volatileDir := layerdir + if transient { + volatileDir = rundir + } rlstore := layerStore{ - lockfile: lockfile, + lockfile: lockFile, mountsLockfile: mountsLockfile, - driver: driver, rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), - uidMap: copyIDMap(s.uidMap), - gidMap: copyIDMap(s.gidMap), - } - if err := rlstore.Load(); err != nil { + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(volatileDir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startWritingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopWriting() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + rlstore.lastWrite = lw + // rlstore.mountsLastWrite is initialized inside rlstore.load(). + if _, err := rlstore.load(true); err != nil { return nil, err } return &rlstore, nil } -func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { - lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } rlstore := layerStore{ lockfile: lockfile, mountsLockfile: nil, - driver: driver, rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(layerdir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startReadingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopReading() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err } - if err := rlstore.Load(); err != nil { + rlstore.lastWrite = lw + if _, err := rlstore.load(false); err != nil { return nil, err } return &rlstore, nil } +// Requires startReading or startWriting. func (r *layerStore) lookup(id string) (*Layer, bool) { if layer, ok := r.byid[id]; ok { return layer, ok @@ -595,6 +1100,7 @@ func (r *layerStore) lookup(id string) (*Layer, bool) { return nil, false } +// Requires startReading or startWriting. func (r *layerStore) Size(name string) (int64, error) { layer, ok := r.lookup(name) if !ok { @@ -609,21 +1115,23 @@ func (r *layerStore) Size(name string) (int64, error) { return -1, nil } +// Requires startWriting. func (r *layerStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown } delete(layer.Flags, flag) - return r.Save() + return r.saveFor(layer) } +// Requires startWriting. func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -633,13 +1141,14 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { layer.Flags = make(map[string]interface{}) } layer.Flags[flag] = value - return r.Save() + return r.saveFor(layer) } func (r *layerStore) Status() ([][2]string, error) { return r.driver.Status(), nil } +// Requires startWriting. func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) { if duplicateLayer, idInUse := r.byid[id]; idInUse { return duplicateLayer, ErrDuplicateID @@ -674,7 +1183,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s // TODO: check if necessary fields are filled r.layers = append(r.layers, layer) - r.idindex.Add(id) + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) r.byid[id] = layer for _, name := range names { // names got from the additional layer store won't be used r.byname[name] = layer @@ -685,16 +1196,19 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s if layer.UncompressedDigest != "" { r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } - if err := r.Save(); err != nil { - r.driver.Remove(id) + if err := r.saveFor(layer); err != nil { + if err2 := r.driver.Remove(id); err2 != nil { + logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2) + } return nil, err } return copyLayer(layer), nil } +// Requires startWriting. func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { - if !r.IsReadWrite() { - return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err @@ -746,7 +1260,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateCompressionType = templateLayer.CompressionType templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) - templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID)) if tserr != nil && !os.IsNotExist(tserr) { return nil, -1, tserr } @@ -759,7 +1273,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parentMappings = &idtools.IDMappings{} } if mountLabel != "" { - label.ReserveLabel(mountLabel) + selinux.ReserveLabel(mountLabel) } // Before actually creating the layer, make a persistent record of it with incompleteFlag, @@ -782,9 +1296,12 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab UIDMap: copyIDMap(moreOptions.UIDMap), GIDMap: copyIDMap(moreOptions.GIDMap), BigDataNames: []string{}, + volatileStore: moreOptions.Volatile, } r.layers = append(r.layers, layer) - r.idindex.Add(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // This is on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Add(id) r.byid[id] = layer for _, name := range names { r.byname[name] = layer @@ -809,7 +1326,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab } }() - err := r.Save() + err := r.saveFor(layer) if err != nil { cleanupFailureContext = "saving incomplete layer metadata" return nil, -1, err @@ -875,7 +1392,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab } } delete(layer.Flags, incompleteFlag) - err = r.Save() + err = r.saveFor(layer) if err != nil { cleanupFailureContext = "saving finished layer metadata" return nil, -1, err @@ -886,34 +1403,41 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab return layer, size, err } +// Requires startWriting. func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) return layer, err } +// Requires startWriting. func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) } +// Requires startReading or startWriting. func (r *layerStore) Mounted(id string) (int, error) { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } - r.mountsLockfile.RLock() - defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return 0, err - } - } layer, ok := r.lookup(id) if !ok { return 0, ErrLayerUnknown } + // NOTE: The caller of this function is not holding (currently cannot hold) r.mountsLockfile, + // so the data is necessarily obsolete by the time this function returns. So, we don’t even + // try to reload it in this function, we just rely on r.load() that happened during + // r.startReading() or r.startWriting(). return layer.MountCount, nil } +// Requires startWriting. func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) + // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { for _, item := range opts { @@ -926,17 +1450,14 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) // You are not allowed to mount layers from readonly stores if they // are not mounted read/only. - if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) { return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return "", err - } + if err := r.reloadMountsIfChanged(); err != nil { + return "", err } - defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { return "", ErrLayerUnknown @@ -976,18 +1497,22 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) return mountpoint, err } -func (r *layerStore) Unmount(id string, force bool) (bool, error) { - if !r.IsReadWrite() { +// Requires startWriting. +func (r *layerStore) unmount(id string, force bool, conditional bool) (bool, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter → simpleGetCloser.Close() + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) + + if !r.lockfile.IsReadWrite() { return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return false, err - } + if err := r.reloadMountsIfChanged(); err != nil { + return false, err } - defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { layerByMount, ok := r.bymount[filepath.Clean(id)] @@ -996,6 +1521,9 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { } layer = layerByMount } + if conditional && layer.MountCount == 0 { + return false, ErrLayerNotMounted + } if force { layer.MountCount = 1 } @@ -1015,17 +1543,16 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { return true, err } +// Requires startReading or startWriting. func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { - if !r.IsReadWrite() { + if !r.lockfile.IsReadWrite() { return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return nil, nil, err - } - } + // We are not checking r.mountsLockfile.Modified() and calling r.loadMounts here because the store + // is only locked for reading = we are not allowed to modify layer data. + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations. layer, ok := r.lookup(id) if !ok { return nil, nil, ErrLayerUnknown @@ -1038,6 +1565,13 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { // We don't know which directories to examine. return nil, nil, ErrLayerNotMounted } + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations, but we didn’t + // hold it continuously since the time we loaded the mount data; so it’s possible the layer + // was unmounted in the meantime, or mounted elsewhere. Treat that as if we were run after the unmount, + // = a missing mount, not a filesystem error. + if _, err := system.Lstat(layer.MountPoint); errors.Is(err, os.ErrNotExist) { + return nil, nil, ErrLayerNotMounted + } rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) if err != nil { return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err) @@ -1086,26 +1620,15 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { return uids, gids, nil } +// Requires startWriting. func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } -// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. -func (r *layerStore) SetNames(id string, names []string) error { - return r.updateNames(id, names, setNames) -} - -func (r *layerStore) AddNames(id string, names []string) error { - return r.updateNames(id, names, addNames) -} - -func (r *layerStore) RemoveNames(id string, names []string) error { - return r.updateNames(id, names, removeNames) -} - +// Requires startWriting. func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -1126,7 +1649,7 @@ func (r *layerStore) updateNames(id string, names []string, op updateNameOperati r.byname[name] = layer } layer.Names = names - return r.Save() + return r.saveFor(layer) } func (r *layerStore) datadir(id string) string { @@ -1137,6 +1660,7 @@ func (r *layerStore) datapath(id, key string) string { return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) } +// Requires startReading or startWriting. func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { if key == "" { return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName) @@ -1148,12 +1672,13 @@ func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { return os.Open(r.datapath(layer.ID, key)) } +// Requires startWriting. func (r *layerStore) SetBigData(id, key string, data io.Reader) error { if key == "" { return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) } - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -1190,11 +1715,12 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error { } if addName { layer.BigDataNames = append(layer.BigDataNames, key) - return r.Save() + return r.saveFor(layer) } return nil } +// Requires startReading or startWriting. func (r *layerStore) BigDataNames(id string) ([]string, error) { layer, ok := r.lookup(id) if !ok { @@ -1203,6 +1729,7 @@ func (r *layerStore) BigDataNames(id string) ([]string, error) { return copyStringSlice(layer.BigDataNames), nil } +// Requires startReading or startWriting. func (r *layerStore) Metadata(id string) (string, error) { if layer, ok := r.lookup(id); ok { return layer.Metadata, nil @@ -1210,13 +1737,14 @@ func (r *layerStore) Metadata(id string) (string, error) { return "", ErrLayerUnknown } +// Requires startWriting. func (r *layerStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly) } if layer, ok := r.lookup(id); ok { layer.Metadata = metadata - return r.Save() + return r.saveFor(layer) } return ErrLayerUnknown } @@ -1226,6 +1754,7 @@ func (r *layerStore) tspath(id string) string { } // layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +// The caller must hold r.inProcessLock for reading. func layerHasIncompleteFlag(layer *Layer) bool { // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil if flagValue, ok := layer.Flags[incompleteFlag]; ok { @@ -1236,9 +1765,10 @@ func layerHasIncompleteFlag(layer *Layer) bool { return false } +// Requires startWriting. func (r *layerStore) deleteInternal(id string) error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -1250,15 +1780,14 @@ func (r *layerStore) deleteInternal(id string) error { layer.Flags = make(map[string]interface{}) } layer.Flags[incompleteFlag] = true - if err := r.Save(); err != nil { + if err := r.saveFor(layer); err != nil { return err } } // We never unset incompleteFlag; below, we remove the entire object from r.layers. id = layer.ID - err := r.driver.Remove(id) - if err != nil { + if err := r.driver.Remove(id); err != nil { return err } @@ -1268,7 +1797,9 @@ func (r *layerStore) deleteInternal(id string) error { for _, name := range layer.Names { delete(r.byname, name) } - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) mountLabel := layer.MountLabel if layer.MountPoint != "" { delete(r.bymount, layer.MountPoint) @@ -1298,13 +1829,14 @@ func (r *layerStore) deleteInternal(id string) error { } } if !found { - label.ReleaseLabel(mountLabel) + selinux.ReleaseLabel(mountLabel) } } return nil } +// Requires startWriting. func (r *layerStore) deleteInDigestMap(id string) { for digest, layers := range r.bycompressedsum { for i, layerID := range layers { @@ -1326,6 +1858,7 @@ func (r *layerStore) deleteInDigestMap(id string) { } } +// Requires startWriting. func (r *layerStore) Delete(id string) error { layer, ok := r.lookup(id) if !ok { @@ -1335,37 +1868,28 @@ func (r *layerStore) Delete(id string) error { // The layer may already have been explicitly unmounted, but if not, we // should try to clean that up before we start deleting anything at the // driver level. - mountCount, err := r.Mounted(id) - if err != nil { - return fmt.Errorf("checking if layer %q is still mounted: %w", id, err) - } - for mountCount > 0 { - if _, err := r.Unmount(id, false); err != nil { - return err + for { + _, err := r.unmount(id, false, true) + if err == ErrLayerNotMounted { + break } - mountCount, err = r.Mounted(id) if err != nil { - return fmt.Errorf("checking if layer %q is still mounted: %w", id, err) + return err } } if err := r.deleteInternal(id); err != nil { return err } - return r.Save() -} - -func (r *layerStore) Lookup(name string) (id string, err error) { - if layer, ok := r.lookup(name); ok { - return layer.ID, nil - } - return "", ErrLayerUnknown + return r.saveFor(layer) } +// Requires startReading or startWriting. func (r *layerStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *layerStore) Get(id string) (*Layer, error) { if layer, ok := r.lookup(id); ok { return copyLayer(layer), nil @@ -1373,14 +1897,18 @@ func (r *layerStore) Get(id string) (*Layer, error) { return nil, ErrLayerUnknown } +// Requires startWriting. func (r *layerStore) Wipe() error { - if !r.IsReadWrite() { - return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } + sort.Slice(ids, func(i, j int) bool { + return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created) + }) for _, id := range ids { if err := r.Delete(id); err != nil { return err @@ -1389,6 +1917,7 @@ func (r *layerStore) Wipe() error { return nil } +// Requires startReading or startWriting. func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { var ok bool toLayer, ok = r.lookup(to) @@ -1413,6 +1942,7 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st return from, to, fromLayer, toLayer, nil } +// The caller must hold r.inProcessLock for reading. func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { if layer == nil { return &idtools.IDMappings{} @@ -1420,6 +1950,7 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) } +// Requires startReading or startWriting. func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) if err != nil { @@ -1438,11 +1969,13 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { return os.Open(filepath.Join(s.path, path)) } +// LOCKING BUG: See the comments in layerStore.Diff func (s *simpleGetCloser) Close() error { - _, err := s.r.Unmount(s.id, false) + _, err := s.r.unmount(s.id, false, false) return err } +// LOCKING BUG: See the comments in layerStore.Diff func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { return getter.DiffGetter(id) @@ -1458,6 +1991,25 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { }, nil } +// writeCompressedData copies data from source to compressor, which is on top of pwriter. +func writeCompressedData(compressor io.WriteCloser, source io.ReadCloser) error { + defer compressor.Close() + defer source.Close() + _, err := io.Copy(compressor, source) + return err +} + +// writeCompressedDataGoroutine copies data from source to compressor, which is on top of pwriter. +// All error must be reported by updating pwriter. +func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteCloser, source io.ReadCloser) { + err := errors.New("internal error: unexpected panic in writeCompressedDataGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pwriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + err = writeCompressedData(compressor, source) +} + +// Requires startReading or startWriting. func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { var metadata storage.Unpacker @@ -1489,12 +2041,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, preader.Close() return nil, err } - go func() { - defer pwriter.Close() - defer compressor.Close() - defer rc.Close() - io.Copy(compressor, rc) - }() + go writeCompressedDataGoroutine(pwriter, compressor, rc) return preader, nil } @@ -1574,6 +2121,8 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, metadata = storage.NewJSONUnpacker(decompressor) + // LOCKING BUG: With btrfs and zfs graph drivers), this uses r.Mount() and r.unmount() holding layerStore only locked for reading + // but they modify in-memory state. fgetter, err := r.newFileGetter(to) if err != nil { errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err)) @@ -1609,6 +2158,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, return maybeCompressReadCloser(rc) } +// Requires startReading or startWriting. func (r *layerStore) DiffSize(from, to string) (size int64, err error) { var fromLayer, toLayer *Layer from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) @@ -1618,13 +2168,15 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } +// Requires startWriting. func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { return r.applyDiffWithOptions(to, nil, diff) } +// Requires startWriting. func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { - if !r.IsReadWrite() { - return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly) + if !r.lockfile.IsReadWrite() { + return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(to) @@ -1660,7 +2212,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if compressedDigester != nil { compressedWriter = compressedDigester.Hash() } else { - compressedWriter = ioutil.Discard + compressedWriter = io.Discard } compressedCounter := ioutils.NewWriteCounter(compressedWriter) defragmented = io.TeeReader(defragmented, compressedCounter) @@ -1710,13 +2262,11 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, return -1, err } compressor.Close() - if err == nil { - if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { - return -1, err - } - if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { - return -1, err - } + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err } if compressedDigester != nil { compressedDigest = compressedDigester.Digest() @@ -1765,11 +2315,12 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, return layer.GIDs[i] < layer.GIDs[j] }) - err = r.Save() + err = r.saveFor(layer) return size, err } +// Requires (startReading or?) startWriting. func (r *layerStore) DifferTarget(id string) (string, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1782,6 +2333,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { return ddriver.DifferTarget(layer.ID) } +// Requires startWriting. func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1806,18 +2358,21 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, layer.UncompressedDigest = diffOutput.UncompressedDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata - if err = r.Save(); err != nil { + if err = r.saveFor(layer); err != nil { return err } for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { - r.Delete(id) + if err2 := r.Delete(id); err2 != nil { + logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2) + } return err } } return err } +// Requires startWriting. func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1845,7 +2400,7 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp } layer.UIDs = output.UIDs layer.GIDs = output.GIDs - err = r.Save() + err = r.saveFor(layer) return &output, err } @@ -1857,6 +2412,7 @@ func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error { return ddriver.CleanupStagingDirectory(stagingDirectory) } +// Requires startReading or startWriting. func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { var layers []Layer for _, layerID := range m[d] { @@ -1869,88 +2425,18 @@ func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Di return layers, nil } +// Requires startReading or startWriting. func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.bycompressedsum, d) } +// Requires startReading or startWriting. func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.byuncompressedsum, d) } -func (r *layerStore) Lock() { - r.lockfile.Lock() -} - -func (r *layerStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *layerStore) RLock() { - r.lockfile.RLock() -} - -func (r *layerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *layerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *layerStore) Modified() (bool, error) { - var mmodified, tmodified bool - lmodified, err := r.lockfile.Modified() - if err != nil { - return lmodified, err - } - if r.IsReadWrite() { - r.mountsLockfile.RLock() - defer r.mountsLockfile.Unlock() - mmodified, err = r.mountsLockfile.Modified() - if err != nil { - return lmodified, err - } - } - - if lmodified || mmodified { - return true, nil - } - - // If the layers.json file has been modified manually, then we have to - // reload the storage in any case. - info, err := os.Stat(r.layerspath()) - if err != nil && !os.IsNotExist(err) { - return false, fmt.Errorf("stat layers file: %w", err) - } - if info != nil { - tmodified = info.ModTime() != r.layerspathModified - r.layerspathModified = info.ModTime() - } - - return tmodified, nil -} - -func (r *layerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *layerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *layerStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *layerStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err +func (r *layerStore) supportsShifting() bool { + return r.driver.SupportsShifting() } func closeAll(closes ...func() error) (rErr error) { diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go index 6fac2ebac63..640203881ac 100644 --- a/vendor/github.com/containers/storage/lockfile_compat.go +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -4,12 +4,15 @@ import ( "github.com/containers/storage/pkg/lockfile" ) -type Locker = lockfile.Locker +// Deprecated: Use lockfile.*LockFile. +type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated +// Deprecated: Use lockfile.GetLockFile. func GetLockfile(path string) (lockfile.Locker, error) { return lockfile.GetLockfile(path) } +// Deprecated: Use lockfile.GetROLockFile. func GetROLockfile(path string) (lockfile.Locker, error) { return lockfile.GetROLockfile(path) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 0d0ad7baec3..6209205b3ad 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "os" "path/filepath" "runtime" @@ -25,7 +24,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" gzip "github.com/klauspost/pgzip" - "github.com/opencontainers/runc/libcontainer/userns" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" ) @@ -77,6 +75,7 @@ const ( solaris = "solaris" windows = "windows" darwin = "darwin" + freebsd = "freebsd" ) var xattrsToIgnore = map[string]interface{}{ @@ -484,7 +483,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts * } // canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. +// path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { name, err := CanonicalTarNameForPath(name) if err != nil { @@ -528,6 +527,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err := ReadUserXattrToTarHeader(path, hdr); err != nil { return err } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return err + } if ta.CopyPass { copyPassHeader(hdr) } @@ -673,7 +675,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } - if err := os.Link(targetPath, path); err != nil { + if err := handleLLink(targetPath, path); err != nil { return err } @@ -771,6 +773,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + if len(errs) > 0 { logrus.WithFields(logrus.Fields{ "errors": errs, @@ -865,7 +876,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil @@ -875,7 +886,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. - return nil + return nil //nolint: nilerr } if options.IncludeSourceDir && include == "." && relFilePath != "." { @@ -892,8 +903,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if include != relFilePath { matches, err := pm.IsMatch(relFilePath) if err != nil { - logrus.Errorf("Matching %s: %v", relFilePath, err) - return err + return fmt.Errorf("matching %s: %w", relFilePath, err) } skip = matches } @@ -956,7 +966,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } } return nil - }) + }); err != nil { + logrus.Errorf("%s", err) + return + } } }() @@ -1100,6 +1113,9 @@ loop: if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } } return nil } @@ -1107,7 +1123,9 @@ loop: // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// +// identity (uncompressed), gzip, bzip2, xz. +// // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) @@ -1159,7 +1177,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { GIDMaps: tarMappings.GIDs(), Compression: Uncompressed, CopyPass: true, - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } archive, err := TarWithOptions(src, options) if err != nil { @@ -1174,7 +1192,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } return archiver.Untar(archive, dst, options) } @@ -1194,7 +1212,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } return archiver.Untar(archive, dst, options) } @@ -1294,7 +1312,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { UIDMaps: archiver.UntarIDMappings.UIDs(), GIDMaps: archiver.UntarIDMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: userns.RunningInUserNS(), + InUserNS: unshare.IsRootless(), NoOverwriteDirNonDir: true, } err = archiver.Untar(r, filepath.Dir(dst), options) @@ -1348,7 +1366,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") + f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } @@ -1472,7 +1490,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() - if err == nil { + if err == nil && hashError != nil { err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go index 7bc44a5665e..eab9da51ab3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -1,3 +1,4 @@ +//go:build go1.10 // +build go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go index d19811fdbca..f591bf389b0 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -1,3 +1,4 @@ +//go:build !go1.10 // +build !go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go new file mode 100644 index 00000000000..4d362f07503 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,19 @@ +//go:build freebsd || darwin +// +build freebsd darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go deleted file mode 100644 index 7c307ffcfe5..00000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build freebsd - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/userns" - "golang.org/x/sys/unix" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if userns.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { - permissionsMask := hdrInfo.Mode() - if forceMask != nil { - permissionsMask = *forceMask - } - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 51fbd9a2197..36e5d4bc2fa 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -189,3 +189,22 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) { } return 0, 0, uint32(f.Mode()), nil } + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go index 4b8834444f0..2468ab3ca4d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 7c3e442dad9..f8a34c8314f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -1,4 +1,5 @@ -// +build !windows,!freebsd +//go:build !windows +// +build !windows package archive @@ -49,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // Currently go does not fill in the major/minors if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert } } @@ -97,24 +98,15 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode |= unix.S_IFIFO } - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) } -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { - permissionsMask := hdrInfo.Mode() - if forceMask != nil { - permissionsMask = *forceMask - } - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - return nil +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index 8e7a2fd029a..e4401177553 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -78,3 +78,8 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows return idtools.IDPair{0, 0}, nil } + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index c7bb25d0f17..fc705484eb3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -5,7 +5,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -57,7 +56,7 @@ func (change *Change) String() string { return fmt.Sprintf("%s %s", change.Kind, change.Path) } -// for sort.Sort +// changesByPath implements sort.Interface. type changesByPath []Change func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } @@ -403,7 +402,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldRoot, newRoot *FileInfo ) if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") + emptyDir, err := os.MkdirTemp("", "empty") if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index 8769f2291b6..c27930e97d8 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -43,7 +43,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -86,8 +91,12 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf if err != nil { return err } - info.stat = s + if s.Dev() != sourceStat.Dev() { + return filepath.SkipDir + } + + info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index 1cc1910f897..6b2e5938078 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package archive @@ -29,6 +30,7 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta if oldStat.Mode() != newStat.Mode() || ownerChanged || oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 6298a674d49..2c714e8da54 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -4,7 +4,6 @@ import ( "archive/tar" "errors" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -255,7 +254,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir // The destination exists as a directory. No alteration // to srcContent is needed as its contents can be // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil + return dstInfo.Path, io.NopCloser(srcContent), nil case dstInfo.Exists && srcInfo.IsDir: // The destination exists as some type of file and the source // content is a directory. This is an error condition since diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go index e305b5e4af9..d6c5fd98b13 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 59a3207fde0..8fec5af3865 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "os" "path/filepath" "runtime" @@ -102,7 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) @@ -146,6 +145,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return nil } if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } err := os.RemoveAll(path) return err } @@ -157,6 +159,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } else { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } if err := os.RemoveAll(originalPath); err != nil { return 0, err } @@ -166,7 +171,15 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in createTarFile otherwise. if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return 0, err @@ -216,6 +229,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } } return size, nil @@ -246,7 +262,9 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp if err != nil { return 0, err } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() if decompress { layer, err = DecompressStream(layer) diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 00000000000..14661c41139 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,167 @@ +//go:build freebsd +// +build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if strings.HasPrefix(fflag, "no") { + isClear = true + fflag = strings.TrimPrefix(fflag, "no") + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + var res = []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 00000000000..27a0bae2b77 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,21 @@ +//go:build !freebsd +// +build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go index e85aac05408..8db64e80429 100644 --- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 2232f5d4af4..2de95f39a8d 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -4,14 +4,13 @@ import ( stdtar "archive/tar" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/runc/libcontainer/userns" + "github.com/containers/storage/pkg/unshare" ) // NewArchiver returns a new Archiver which uses chrootarchive.Untar @@ -31,7 +30,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools. // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// +// identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, true, dest) } @@ -66,7 +66,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions } if options == nil { options = &archive.TarOptions{} - options.InUserNS = userns.RunningInUserNS() + options.InUserNS = unshare.IsRootless() } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} @@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions } } - r := ioutil.NopCloser(tarArchive) + r := tarArchive if decompress { decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { @@ -136,7 +136,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() - if err == nil { + if err == nil && hashError != nil { err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go index d257cc8e942..42ee39f48f5 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -6,11 +6,7 @@ import ( "github.com/containers/storage/pkg/archive" ) -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { return archive.Unpack(decompressedArchive, dest, options) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 2d64c280065..8cc0f33b31c 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -9,7 +9,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -111,7 +110,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, // we need to exhaust `xz`'s output, otherwise the `xz` side will be // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) + io.Copy(io.Discard, decompressedArchive) return fmt.Errorf("processing tar file(%s): %w", output, err) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 8a5c680b146..1395ff8cd7b 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -12,7 +12,7 @@ func chroot(path string) error { return nil } -func invokeUnpack(decompressedArchive io.ReadCloser, +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { // Windows is different to Linux here because Windows does not support diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 255882174c9..09ef6d5de45 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -2,7 +2,6 @@ package chrootarchive import ( "fmt" - "io/ioutil" "net" "os" "os/user" @@ -51,7 +50,7 @@ func chroot(path string) (err error) { } // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") + pivotDir, err := os.MkdirTemp(path, ".pivot_root") if err != nil { return fmt.Errorf("setting up pivot dir: %w", err) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index d5aedd002e1..b03e97460b4 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !linux && !darwin // +build !windows,!linux,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go index d6326c808e2..7b4ea9e51c6 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go @@ -3,7 +3,6 @@ package chrootarchive import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -26,15 +25,15 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions layer = decompressed } - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err) } s, err := archive.UnpackLayer(dest, layer, options) os.RemoveAll(tmpDir) if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err) } return s, nil diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 3ebee94969b..90f453913e5 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -8,7 +8,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -16,7 +15,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/userns" + "github.com/containers/storage/pkg/unshare" ) type applyLayerResponse struct { @@ -36,7 +35,7 @@ func applyLayer() { runtime.LockOSThread() flag.Parse() - inUserns := userns.RunningInUserNS() + inUserns := unshare.IsRootless() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } @@ -56,7 +55,7 @@ func applyLayer() { options.InUserNS = true } - if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { + if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil { fatal(err) } @@ -95,7 +94,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions } if options == nil { options = &archive.TarOptions{} - if userns.RunningInUserNS() { + if unshare.IsRootless() { options.InUserNS = true } } @@ -110,7 +109,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions cmd := reexec.Command("storage-applyLayer", dest) cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data)) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go index 8f8e88bfbea..8bfff5d65ba 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -3,7 +3,6 @@ package chrootarchive import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions layer = decompressed } - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") if err != nil { return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go index 45caec97225..274a946e2fb 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin package chrootarchive @@ -5,7 +6,6 @@ package chrootarchive import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/storage/pkg/reexec" @@ -25,5 +25,5 @@ func fatal(err error) { // flush consumes all the bytes from the reader discarding // any errors func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) + return io.Copy(io.Discard, r) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go index 63f9704564c..40eec4dc0a8 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -1,3 +1,6 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + package chrootarchive import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index c88091393b0..7279567999a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "sort" "strconv" @@ -128,7 +127,7 @@ func (c *layersCache) load() error { } defer manifestReader.Close() - manifest, err := ioutil.ReadAll(manifestReader) + manifest, err := io.ReadAll(manifestReader) if err != nil { return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) } @@ -334,7 +333,7 @@ func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) }() defer pipeReader.Close() - counter := ioutils.NewWriteCounter(ioutil.Discard) + counter := ioutils.NewWriteCounter(io.Discard) r := io.TeeReader(pipeReader, counter) diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go index 8d4d3c4a74e..e828d479f2b 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -1,21 +1,10 @@ package chunked import ( - archivetar "archive/tar" - "bytes" - "encoding/binary" - "errors" - "fmt" "io" - "strconv" - "github.com/containerd/stargz-snapshotter/estargz" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" - "github.com/klauspost/compress/zstd" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - "github.com/vbatts/tar-split/archive/tar" ) const ( @@ -29,247 +18,6 @@ const ( TypeSymlink = internal.TypeSymlink ) -var typesToTar = map[string]byte{ - TypeReg: tar.TypeReg, - TypeLink: tar.TypeLink, - TypeChar: tar.TypeChar, - TypeBlock: tar.TypeBlock, - TypeDir: tar.TypeDir, - TypeFifo: tar.TypeFifo, - TypeSymlink: tar.TypeSymlink, -} - -func typeToTarType(t string) (byte, error) { - r, found := typesToTar[t] - if !found { - return 0, fmt.Errorf("unknown type: %v", t) - } - return r, nil -} - -func isZstdChunkedFrameMagic(data []byte) bool { - if len(data) < 8 { - return false - } - return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8]) -} - -func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { - // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md - footerSize := int64(51) - if blobSize <= footerSize { - return nil, 0, errors.New("blob too small") - } - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - defer reader.Close() - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err - } - - /* Read the ToC offset: - - 10 bytes gzip header - - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) - - 2 bytes Extra: SI1 = 'S', SI2 = 'G' - - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) - - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) - - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0 - - 8 bytes gzip footer - */ - tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) - if err != nil { - return nil, 0, fmt.Errorf("parse ToC offset: %w", err) - } - - size := int64(blobSize - footerSize - tocOffset) - // set a reasonable limit - if size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - chunk = ImageSourceChunk{ - Offset: uint64(tocOffset), - Length: uint64(size), - } - parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - - var tocReader io.ReadCloser - select { - case r := <-parts: - tocReader = r - case err := <-errs: - return nil, 0, err - } - defer tocReader.Close() - - r, err := pgzip.NewReader(tocReader) - if err != nil { - return nil, 0, err - } - defer r.Close() - - aTar := archivetar.NewReader(r) - - header, err := aTar.Next() - if err != nil { - return nil, 0, err - } - // set a reasonable limit - if header.Size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - manifestUncompressed := make([]byte, header.Size) - if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { - return nil, 0, err - } - - manifestDigester := digest.Canonical.Digester() - manifestChecksum := manifestDigester.Hash() - if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { - return nil, 0, err - } - - d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) - if err != nil { - return nil, 0, err - } - if manifestDigester.Digest() != d { - return nil, 0, errors.New("invalid manifest checksum") - } - - return manifestUncompressed, tocOffset, nil -} - -// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must -// be specified. -// This function uses the io.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { - footerSize := int64(internal.FooterSizeSupported) - if blobSize <= footerSize { - return nil, 0, errors.New("blob too small") - } - - manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] - if manifestChecksumAnnotation == "" { - return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) - } - - var offset, length, lengthUncompressed, manifestType uint64 - - if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { - if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { - return nil, 0, err - } - } else { - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err - } - - offset = binary.LittleEndian.Uint64(footer[0:8]) - length = binary.LittleEndian.Uint64(footer[8:16]) - lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) - manifestType = binary.LittleEndian.Uint64(footer[24:32]) - if !isZstdChunkedFrameMagic(footer[32:40]) { - return nil, 0, errors.New("invalid magic number") - } - } - - if manifestType != internal.ManifestTypeCRFS { - return nil, 0, errors.New("invalid manifest type") - } - - // set a reasonable limit - if length > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - if lengthUncompressed > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - chunk := ImageSourceChunk{ - Offset: offset, - Length: length, - } - - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - - manifest := make([]byte, length) - if _, err := io.ReadFull(reader, manifest); err != nil { - return nil, 0, err - } - - manifestDigester := digest.Canonical.Digester() - manifestChecksum := manifestDigester.Hash() - if _, err := manifestChecksum.Write(manifest); err != nil { - return nil, 0, err - } - - d, err := digest.Parse(manifestChecksumAnnotation) - if err != nil { - return nil, 0, err - } - if manifestDigester.Digest() != d { - return nil, 0, errors.New("invalid manifest checksum") - } - - decoder, err := zstd.NewReader(nil) - if err != nil { - return nil, 0, err - } - defer decoder.Close() - - b := make([]byte, 0, lengthUncompressed) - if decoded, err := decoder.DecodeAll(manifest, b); err == nil { - return decoded, int64(offset), nil - } - - return manifest, int64(offset), nil -} - // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. // Deprecated: Use pkg/chunked/compressor.ZstdCompressor. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go new file mode 100644 index 00000000000..f6c7b77b64e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -0,0 +1,259 @@ +package chunked + +import ( + archivetar "archive/tar" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "strconv" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +var typesToTar = map[string]byte{ + TypeReg: tar.TypeReg, + TypeLink: tar.TypeLink, + TypeChar: tar.TypeChar, + TypeBlock: tar.TypeBlock, + TypeDir: tar.TypeDir, + TypeFifo: tar.TypeFifo, + TypeSymlink: tar.TypeSymlink, +} + +func typeToTarType(t string) (byte, error) { + r, found := typesToTar[t] + if !found { + return 0, fmt.Errorf("unknown type: %v", t) + } + return r, nil +} + +func isZstdChunkedFrameMagic(data []byte) bool { + if len(data) < 8 { + return false + } + return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8]) +} + +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md + footerSize := int64(51) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + defer reader.Close() + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + /* Read the ToC offset: + - 10 bytes gzip header + - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + - 2 bytes Extra: SI1 = 'S', SI2 = 'G' + - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0 + - 8 bytes gzip footer + */ + tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) + if err != nil { + return nil, 0, fmt.Errorf("parse ToC offset: %w", err) + } + + size := int64(blobSize - footerSize - tocOffset) + // set a reasonable limit + if size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk = ImageSourceChunk{ + Offset: uint64(tocOffset), + Length: uint64(size), + } + parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + + var tocReader io.ReadCloser + select { + case r := <-parts: + tocReader = r + case err := <-errs: + return nil, 0, err + } + defer tocReader.Close() + + r, err := pgzip.NewReader(tocReader) + if err != nil { + return nil, 0, err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return nil, 0, err + } + // set a reasonable limit + if header.Size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + manifestUncompressed := make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + return manifestUncompressed, tocOffset, nil +} + +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must +// be specified. +// This function uses the io.github.containers.zstd-chunked. annotations when specified. +func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + footerSize := int64(internal.FooterSizeSupported) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + + manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] + if manifestChecksumAnnotation == "" { + return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) + } + + var offset, length, lengthUncompressed, manifestType uint64 + + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { + return nil, 0, err + } + } else { + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + offset = binary.LittleEndian.Uint64(footer[0:8]) + length = binary.LittleEndian.Uint64(footer[8:16]) + lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) + manifestType = binary.LittleEndian.Uint64(footer[24:32]) + if !isZstdChunkedFrameMagic(footer[32:40]) { + return nil, 0, errors.New("invalid magic number") + } + } + + if manifestType != internal.ManifestTypeCRFS { + return nil, 0, errors.New("invalid manifest type") + } + + // set a reasonable limit + if length > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + if lengthUncompressed > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk := ImageSourceChunk{ + Offset: offset, + Length: length, + } + + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + + manifest := make([]byte, length) + if _, err := io.ReadFull(reader, manifest); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifest); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(manifestChecksumAnnotation) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + decoder, err := zstd.NewReader(nil) + if err != nil { + return nil, 0, err + } + defer decoder.Close() + + b := make([]byte, 0, lengthUncompressed) + if decoded, err := decoder.DecodeAll(manifest, b); err == nil { + return decoded, int64(offset), nil + } + + return manifest, int64(offset), nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index aeb7cfd4f01..2a9bdc67565 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -8,7 +8,6 @@ import ( "bufio" "encoding/base64" "io" - "io/ioutil" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/ioutils" @@ -21,9 +20,7 @@ const holesThreshold = int64(1 << 10) type holesFinder struct { reader *bufio.Reader - fileOff int64 zeros int64 - from int64 threshold int64 state int @@ -36,11 +33,11 @@ const ( holesFinderStateEOF ) -// ReadByte reads a single byte from the underlying reader. +// readByte reads a single byte from the underlying reader. // If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). // If there are at least f.THRESHOLD consecutive zeros, then the // return value is (N_CONSECUTIVE_ZEROS, '\x00'). -func (f *holesFinder) ReadByte() (int64, byte, error) { +func (f *holesFinder) readByte() (int64, byte, error) { for { switch f.state { // reading the file stream @@ -81,7 +78,7 @@ func (f *holesFinder) ReadByte() (int64, byte, error) { f.state = holesFinderStateFound } } else { - if f.reader.UnreadByte(); err != nil { + if err := f.reader.UnreadByte(); err != nil { return 0, 0, err } f.state = holesFinderStateRead @@ -98,7 +95,7 @@ func (f *holesFinder) ReadByte() (int64, byte, error) { return holeLen, 0, nil } if b != 0 { - if f.reader.UnreadByte(); err != nil { + if err := f.reader.UnreadByte(); err != nil { return 0, 0, err } f.state = holesFinderStateRead @@ -162,7 +159,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { } for i := 0; i < len(b); i++ { - holeLen, n, err := rc.reader.ReadByte() + holeLen, n, err := rc.reader.readByte() if err != nil { if err == io.EOF { rc.closed = true @@ -432,7 +429,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level go func() { ch <- writeZstdChunkedStream(out, metadata, r, level) - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. r.Close() close(ch) }() diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 3bb5286d92d..092b03533bf 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -88,8 +88,8 @@ func GetType(t byte) (string, error) { } const ( - ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum" - ManifestInfoKey = "io.containers.zstd-chunked.manifest-position" + ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -114,7 +114,7 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error { return err } - var size []byte = make([]byte, 4) + size := make([]byte, 4) binary.LittleEndian.PutUint32(size, uint32(len(data))) if _, err := dest.Write(size); err != nil { return err @@ -168,7 +168,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off } // Store the offset to the manifest and its size in LE order - var manifestDataLE []byte = make([]byte, FooterSizeSupported) + manifestDataLE := make([]byte, FooterSizeSupported) binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go index 9212cbbcff8..752ee252003 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go @@ -1,7 +1,6 @@ package chunked import ( - "fmt" "io" ) @@ -18,9 +17,9 @@ type ImageSourceSeekable interface { } // ErrBadRequest is returned when the request is not valid -type ErrBadRequest struct { +type ErrBadRequest struct { //nolint: errname } func (e ErrBadRequest) Error() string { - return fmt.Sprintf("bad request") + return "bad request" } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 7278f2d886f..83d6e2f88c9 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -8,7 +8,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -657,7 +656,7 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed // Only the missing chunk in the requested part refers to a hole. // The received data must be discarded. limitReader := io.LimitReader(from, mf.CompressedSize) - _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer) return fileTypeHole, err case partCompression == fileTypeZstdChunked: c.rawReader = io.LimitReader(from, mf.CompressedSize) @@ -856,7 +855,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) - _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer) if err != nil { Err = err goto exit @@ -906,7 +905,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan goto exit } if c.rawReader != nil { - if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + if _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer); err != nil { Err = err goto exit } diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go index 082fb1ba329..ead15c14864 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index 190d83d4999..7baca812632 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go index 7f793c27086..071f7f35b1d 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && !libdm_no_deferred_remove // +build linux,cgo,!libdm_no_deferred_remove package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go index 7d84508982d..93dcc322197 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && !static_build // +build linux,cgo,!static_build package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go index a880fec8c49..91906f2efef 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && libdm_no_deferred_remove // +build linux,cgo,libdm_no_deferred_remove package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go index cf7f26a4c67..68ea48fe572 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && static_build // +build linux,cgo,static_build package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go index 50ea7c48238..90ffe2c3fd3 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go index b0ce706e5ed..829fe59f3ae 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -1,7 +1,6 @@ package directory import ( - "io/ioutil" "os" "path/filepath" ) @@ -15,7 +14,7 @@ type DiskUsage struct { // MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path func MoveToSubdir(oldpath, subdir string) error { - infos, err := ioutil.ReadDir(oldpath) + infos, err := os.ReadDir(oldpath) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go index 7df7f3d4364..e883d25f51c 100644 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmesg diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index abf6e2f85ef..e3cef48a30f 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -321,14 +321,14 @@ func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go index 92056c1d5f6..92e0263d818 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -1,10 +1,10 @@ +//go:build linux || freebsd // +build linux freebsd package fileutils import ( "fmt" - "io/ioutil" "os" "github.com/sirupsen/logrus" @@ -13,7 +13,7 @@ import ( // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { logrus.Errorf("%v", err) } else { return len(fds) diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go index e6094b55b71..9854cac1c29 100644 --- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package fsutils import ( "fmt" - "io/ioutil" "os" "unsafe" @@ -12,14 +12,14 @@ import ( ) func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) + children, err := os.ReadDir(path) if err != nil { return "", err } if len(children) != 0 { return "", nil } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + dummyFile, err := os.CreateTemp(path, "fsutils-dummy") if err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 027db259c19..0883ee02304 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin && !freebsd // +build !linux,!darwin,!freebsd package homedir diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 33177bdf306..9976f19af41 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package homedir @@ -46,7 +47,7 @@ func GetShortcutString() string { // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetRuntimeDir() (string, error) { if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil + return filepath.EvalSymlinks(xdgRuntimeDir) } return "", errors.New("could not get XDG_RUNTIME_DIR") } @@ -62,7 +63,7 @@ func StickRuntimeDirContents(files []string) ([]string, error) { runtimeDir, err := GetRuntimeDir() if err != nil { // ignore error if runtimeDir is empty - return nil, nil + return nil, nil //nolint: nilerr } runtimeDir, err = filepath.Abs(runtimeDir) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index a7f4eaf1302..4cec8b263c5 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -2,8 +2,8 @@ package idtools import ( "bufio" + "errors" "fmt" - "io/ioutil" "os" "os/user" "runtime" @@ -219,7 +219,7 @@ func getOverflowUID() int { overflowUIDOnce.Do(func() { // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present overflowUID = 65534 - if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { if tmp, err := strconv.Atoi(string(content)); err == nil { overflowUID = tmp } @@ -233,7 +233,7 @@ func getOverflowGID() int { overflowGIDOnce.Do(func() { // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present overflowGID = 65534 - if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { if tmp, err := strconv.Atoi(string(content)); err == nil { overflowGID = tmp } @@ -360,7 +360,8 @@ func parseSubidFile(path, username string) (ranges, error) { } func checkChownErr(err error, name string, uid, gid int) error { - if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + var e *os.PathError + if errors.As(err, &e) && e.Err == syscall.EINVAL { return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go index 84da1b764bf..78141fb8592 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !libsubid || !cgo // +build !linux !libsubid !cgo package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go index 16be94f446f..dc69c60764b 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index a467f41c356..40e507f7799 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -2,11 +2,12 @@ package idtools import ( "fmt" - "regexp" "sort" "strconv" "strings" "sync" + + "github.com/containers/storage/pkg/regexp" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard @@ -24,7 +25,7 @@ var ( "usermod": "-%s %d-%d %s", } - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) // default length for a UID/GID subordinate range defaultRangeLen = 65536 defaultRangeStart = 100000 diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index cd12470f9da..231d1c47b27 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -2,9 +2,9 @@ package ioutils import ( "io" - "io/ioutil" "os" "path/filepath" + "time" ) // AtomicFileWriterOptions specifies options for creating the atomic file writer. @@ -14,9 +14,12 @@ type AtomicFileWriterOptions struct { // storage after it has been written and before it is moved to // the specified path. NoSync bool + // On successful return from Close() this is set to the mtime of the + // newly written file. + ModTime time.Time } -var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{} +var defaultWriterOptions = AtomicFileWriterOptions{} // SetDefaultOptions overrides the default options used when creating an // atomic file writer. @@ -28,7 +31,14 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) { // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + return newAtomicFileWriter(filename, perm, opts) +} + +// newAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) { + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return nil, err } @@ -55,28 +65,38 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err } // AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) +func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error { + f, err := newAtomicFileWriter(filename, perm, opts) if err != nil { return err } n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err + f.writeErr = err } if err1 := f.Close(); err == nil { err = err1 } + + if opts != nil { + opts.ModTime = f.modTime + } + return err } +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + return AtomicWriteFileWithOpts(filename, data, perm, nil) +} + type atomicFileWriter struct { f *os.File fn string writeErr error perm os.FileMode noSync bool + modTime time.Time } func (w *atomicFileWriter) Write(dt []byte) (int, error) { @@ -99,9 +119,25 @@ func (w *atomicFileWriter) Close() (retErr error) { return err } } + + // fstat before closing the fd + info, statErr := w.f.Stat() + if statErr == nil { + w.modTime = info.ModTime() + } + // We delay error reporting until after the real call to close() + // to match the traditional linux close() behaviour that an fd + // is invalid (closed) even if close returns failure. While + // weird, this allows a well defined way to not leak open fds. + if err := w.f.Close(); err != nil { return err } + + if statErr != nil { + return statErr + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { return err } @@ -124,7 +160,7 @@ type AtomicWriteSet struct { // commit. If no temporary directory is given the system // default is used. func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") + td, err := os.MkdirTemp(tmpDir, "write-set-") if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go index 79a094035db..635489280d3 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ioutils diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go index 1539ad21b57..9d5af610e09 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -1,10 +1,11 @@ +//go:build !windows // +build !windows package ioutils -import "io/ioutil" +import "os" -// TempDir on Unix systems is equivalent to ioutil.TempDir. +// TempDir on Unix systems is equivalent to os.MkdirTemp. func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) + return os.MkdirTemp(dir, prefix) } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go index c719c120b5f..2c2242d69d4 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -1,16 +1,17 @@ +//go:build windows // +build windows package ioutils import ( - "io/ioutil" + "os" "github.com/containers/storage/pkg/longpath" ) -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) + tempDir, err := os.MkdirTemp(dir, prefix) if err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index d3f4df0985c..ec25f8a9cfe 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -10,6 +10,8 @@ import ( // A Locker represents a file lock where the file is used to cache an // identifier of the last party that made changes to whatever's being protected // by the lock. +// +// Deprecated: Refer directly to *LockFile, the provided implementation, instead. type Locker interface { // Acquire a writer lock. // The default unix implementation panics if: @@ -17,10 +19,6 @@ type Locker interface { // - tried to lock a read-only lock-file Lock() - // Acquire a writer lock recursively, allowing for recursive acquisitions - // within the same process space. - RecursiveLock() - // Unlock the lock. // The default unix implementation panics if: // - unlocking an unlocked lock @@ -32,10 +30,13 @@ type Locker interface { // Touch records, for others sharing the lock, that the caller was the // last writer. It should only be called with the lock held. + // + // Deprecated: Use *LockFile.RecordWrite. Touch() error // Modified() checks if the most recent writer was a party other than the // last recorded writer. It should only be called with the lock held. + // Deprecated: Use *LockFile.ModifiedSince. Modified() (bool, error) // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. @@ -44,63 +45,86 @@ type Locker interface { // IsReadWrite() checks if the lock file is read-write IsReadWrite() bool - // Locked() checks if lock is locked for writing by a thread in this process - Locked() bool + // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock. + AssertLocked() + + // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. + AssertLockedForWriting() } var ( - lockfiles map[string]Locker - lockfilesLock sync.Mutex + lockFiles map[string]*LockFile + lockFilesLock sync.Mutex ) +// GetLockFile opens a read-write lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetLockFile(path string) (*LockFile, error) { + return getLockfile(path, false) +} + // GetLockfile opens a read-write lock file, creating it if necessary. The // Locker object may already be locked if the path has already been requested // by the current process. +// +// Deprecated: Use GetLockFile func GetLockfile(path string) (Locker, error) { - return getLockfile(path, false) + return GetLockFile(path) +} + +// GetROLockFile opens a read-only lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetROLockFile(path string) (*LockFile, error) { + return getLockfile(path, true) } // GetROLockfile opens a read-only lock file, creating it if necessary. The // Locker object may already be locked if the path has already been requested // by the current process. +// +// Deprecated: Use GetROLockFile func GetROLockfile(path string) (Locker, error) { - return getLockfile(path, true) + return GetROLockFile(path) } -// getLockfile returns a Locker object, possibly (depending on the platform) +// getLockFile returns a *LockFile object, possibly (depending on the platform) // working inter-process, and associated with the specified path. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func getLockfile(path string, ro bool) (Locker, error) { - lockfilesLock.Lock() - defer lockfilesLock.Unlock() - if lockfiles == nil { - lockfiles = make(map[string]Locker) +func getLockfile(path string, ro bool) (*LockFile, error) { + lockFilesLock.Lock() + defer lockFilesLock.Unlock() + if lockFiles == nil { + lockFiles = make(map[string]*LockFile) } cleanPath, err := filepath.Abs(path) if err != nil { return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) } - if locker, ok := lockfiles[cleanPath]; ok { - if ro && locker.IsReadWrite() { + if lockFile, ok := lockFiles[cleanPath]; ok { + if ro && lockFile.IsReadWrite() { return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) } - if !ro && !locker.IsReadWrite() { + if !ro && !lockFile.IsReadWrite() { return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) } - return locker, nil + return lockFile, nil } - locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker + lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile if err != nil { return nil, err } - lockfiles[cleanPath] = locker - return locker, nil + lockFiles[cleanPath] = lockFile + return lockFile, nil } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index b04c1ad0539..335980914b7 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -18,28 +18,48 @@ import ( "golang.org/x/sys/unix" ) -type lockfile struct { +// *LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { + // The following fields are only set when constructing *LockFile, and must never be modified afterwards. + // They are safe to access without any other locking. + file string + ro bool + // rwMutex serializes concurrent reader-writer acquisitions in the same process space rwMutex *sync.RWMutex // stateMutex is used to synchronize concurrent accesses to the state below stateMutex *sync.Mutex counter int64 - file string - fd uintptr - lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID() + lw LastWrite // A global value valid as of the last .Touch() or .Modified() locktype int16 locked bool - ro bool - recursive bool + // The following fields are only modified on transitions between counter == 0 / counter != 0. + // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. + // In other cases, they need to be protected using stateMutex. + fd uintptr +} + +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// +// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. +type LastWrite struct { + // Never modify fields of a LastWrite object; it has value semantics. + state []byte // Contents of the lock file. } const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) var lastWriterIDCounter uint64 // Private state for newLastWriterID -// newLastWriterID returns a new "last writer" ID. +// newLastWrite returns a new "last write" ID. // The value must be different on every call, and also differ from values // generated by other processes. -func newLastWriterID() []byte { +func newLastWrite() LastWrite { // The ID is (PID, time, per-process counter, random) // PID + time represents both a unique process across reboots, // and a specific time within the process; the per-process counter @@ -61,53 +81,84 @@ func newLastWriterID() []byte { panic(err) // This shouldn't happen } - return res + return LastWrite{ + state: res, + } +} + +// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize +func newLastWriteFromData(serialized []byte) LastWrite { + if serialized == nil { + panic("newLastWriteFromData with nil data") + } + return LastWrite{ + state: serialized, + } +} + +// serialize returns bytes to write to the lock file to represent the specified write. +func (lw LastWrite) serialize() []byte { + if lw.state == nil { + panic("LastWrite.serialize on an uninitialized object") + } + return lw.state +} + +// Equals returns true if lw matches other +func (lw LastWrite) equals(other LastWrite) bool { + if lw.state == nil { + panic("LastWrite.equals on an uninitialized object") + } + if other.state == nil { + panic("LastWrite.equals with an uninitialized counterparty") + } + return bytes.Equal(lw.state, other.state) } // openLock opens the file at path and returns the corresponding file -// descriptor. Note that the path is opened read-only when ro is set. If ro -// is unset, openLock will open the path read-write and create the file if -// necessary. +// descriptor. The path is opened either read-only or read-write, +// depending on the value of ro argument. +// +// openLock will create the file and its parent directories, +// if necessary. func openLock(path string, ro bool) (fd int, err error) { + flags := unix.O_CLOEXEC | os.O_CREATE if ro { - fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0) + flags |= os.O_RDONLY } else { - fd, err = unix.Open(path, - os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, - unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH, - ) + flags |= os.O_RDWR } - + fd, err = unix.Open(path, flags, 0o644) if err == nil { - return + return fd, nil } // the directory of the lockfile seems to be removed, try to create it if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { - return fd, fmt.Errorf("creating locker directory: %w", err) + return fd, fmt.Errorf("creating lock file directory: %w", err) } return openLock(path, ro) } - return + return fd, &os.PathError{Op: "open", Path: path, Err: err} } -// createLockerForPath returns a Locker object, possibly (depending on the platform) +// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // // This function will be called at most once for each path value within a single process. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func createLockerForPath(path string, ro bool) (Locker, error) { +func createLockFileForPath(path string, ro bool) (*LockFile, error) { // Check if we can open the lock. fd, err := openLock(path, ro) if err != nil { @@ -119,22 +170,24 @@ func createLockerForPath(path string, ro bool) (Locker, error) { if ro { locktype = unix.F_RDLCK } - return &lockfile{ - stateMutex: &sync.Mutex{}, + return &LockFile{ + file: path, + ro: ro, + rwMutex: &sync.RWMutex{}, - file: path, - lw: newLastWriterID(), + stateMutex: &sync.Mutex{}, + lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. locktype: int16(locktype), locked: false, - ro: ro}, nil + }, nil } // lock locks the lockfile via FCTNL(2) based on the specified type and // command. -func (l *lockfile) lock(lType int16, recursive bool) { +func (l *LockFile) lock(lType int16) { lk := unix.Flock_t{ Type: lType, - Whence: int16(os.SEEK_SET), + Whence: int16(unix.SEEK_SET), Start: 0, Len: 0, } @@ -142,13 +195,7 @@ func (l *lockfile) lock(lType int16, recursive bool) { case unix.F_RDLCK: l.rwMutex.RLock() case unix.F_WRLCK: - if recursive { - // NOTE: that's okay as recursive is only set in RecursiveLock(), so - // there's no need to protect against hypothetical RDLCK cases. - l.rwMutex.RLock() - } else { - l.rwMutex.Lock() - } + l.rwMutex.Lock() default: panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType)) } @@ -158,7 +205,7 @@ func (l *lockfile) lock(lType int16, recursive bool) { // If we're the first reference on the lock, we need to open the file again. fd, err := openLock(l.file, l.ro) if err != nil { - panic(fmt.Sprintf("error opening %q: %v", l.file, err)) + panic(err) } l.fd = uintptr(fd) @@ -171,39 +218,27 @@ func (l *lockfile) lock(lType int16, recursive bool) { } l.locktype = lType l.locked = true - l.recursive = recursive l.counter++ } // Lock locks the lockfile as a writer. Panic if the lock is a read-only one. -func (l *lockfile) Lock() { +func (l *LockFile) Lock() { if l.ro { panic("can't take write lock on read-only lock file") } else { - l.lock(unix.F_WRLCK, false) - } -} - -// RecursiveLock locks the lockfile as a writer but allows for recursive -// acquisitions within the same process space. Note that RLock() will be called -// if it's a lockTypReader lock. -func (l *lockfile) RecursiveLock() { - if l.ro { - l.RLock() - } else { - l.lock(unix.F_WRLCK, true) + l.lock(unix.F_WRLCK) } } // LockRead locks the lockfile as a reader. -func (l *lockfile) RLock() { - l.lock(unix.F_RDLCK, false) +func (l *LockFile) RLock() { + l.lock(unix.F_RDLCK) } // Unlock unlocks the lockfile. -func (l *lockfile) Unlock() { +func (l *LockFile) Unlock() { l.stateMutex.Lock() - if l.locked == false { + if !l.locked { // Panic when unlocking an unlocked lock. That's a violation // of the lock semantics and will reveal such. panic("calling Unlock on unlocked lock") @@ -224,7 +259,7 @@ func (l *lockfile) Unlock() { // file lock. unix.Close(int(l.fd)) } - if l.locktype == unix.F_RDLCK || l.recursive { + if l.locktype == unix.F_RDLCK { l.rwMutex.RUnlock() } else { l.rwMutex.Unlock() @@ -232,59 +267,157 @@ func (l *lockfile) Unlock() { l.stateMutex.Unlock() } -// Locked checks if lockfile is locked for writing by a thread in this process. -func (l *lockfile) Locked() bool { - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - return l.locked && (l.locktype == unix.F_WRLCK) +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked” method, which exists only for sanity checks. + + // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true + // with no possible writers. + // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data + // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } } -// Touch updates the lock file with the UID of the user. -func (l *lockfile) Touch() error { - l.stateMutex.Lock() - if !l.locked || (l.locktype != unix.F_WRLCK) { - panic("attempted to update last-writer in lockfile without the write lock") +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + + l.AssertLocked() + // Like AssertLocked, don’t even bother with l.stateMutex. + if l.locktype != unix.F_WRLCK { + panic("internal error: lock is not held for writing") } - defer l.stateMutex.Unlock() - l.lw = newLastWriterID() - n, err := unix.Pwrite(int(l.fd), l.lw, 0) +} + +// GetLastWrite returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + contents := make([]byte, lastWriterIDSize) + n, err := unix.Pread(int(l.fd), contents, 0) + if err != nil { + return LastWrite{}, err + } + // It is important to handle the partial read case, because + // the initial size of the lock file is zero, which is a valid + // state (no writes yet) + contents = contents[:n] + return newLastWriteFromData(contents), nil +} + +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + l.AssertLockedForWriting() + lw := newLastWrite() + lockContents := lw.serialize() + n, err := unix.Pwrite(int(l.fd), lockContents, 0) + if err != nil { + return LastWrite{}, err + } + if n != len(lockContents) { + return LastWrite{}, unix.ENOSPC + } + return lw, nil +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + l.AssertLocked() + currentLW, err := l.GetLastWrite() + if err != nil { + return LastWrite{}, false, err + } + modified := !previous.equals(currentLW) + return currentLW, modified, nil +} + +// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. +// +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { + lw, err := l.RecordWrite() if err != nil { return err } - if n != len(l.lw) { - return unix.ENOSPC + l.stateMutex.Lock() + if !l.locked || (l.locktype != unix.F_WRLCK) { + panic("attempted to update last-writer in lockfile without the write lock") } + defer l.stateMutex.Unlock() + l.lw = lw return nil } // Modified indicates if the lockfile has been updated since the last time it // was loaded. -func (l *lockfile) Modified() (bool, error) { +// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. +// Callers cannot, in general, rely on this, because that might have happened for some other +// owner of the same *LockFile who created it previously. +// +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { l.stateMutex.Lock() if !l.locked { panic("attempted to check last-writer in lockfile without locking it first") } defer l.stateMutex.Unlock() - currentLW := make([]byte, len(l.lw)) - n, err := unix.Pread(int(l.fd), currentLW, 0) + oldLW := l.lw + // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. + currentLW, modified, err := l.ModifiedSince(oldLW) if err != nil { return true, err } - if n != len(l.lw) { - return true, nil - } - oldLW := l.lw l.lw = currentLW - return !bytes.Equal(currentLW, oldLW), nil + return modified, nil } // IsReadWriteLock indicates if the lock file is a read-write lock. -func (l *lockfile) IsReadWrite() bool { +func (l *LockFile) IsReadWrite() bool { return !l.ro } // TouchedSince indicates if the lock file has been touched since the specified time -func (l *lockfile) TouchedSince(when time.Time) bool { +func (l *LockFile) TouchedSince(when time.Time) bool { st, err := system.Fstat(int(l.fd)) if err != nil { return true diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 82bd91db9a9..09f2aca5ccd 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package lockfile @@ -8,65 +9,140 @@ import ( "time" ) -// createLockerForPath returns a Locker object, possibly (depending on the platform) +// createLockFileForPath returns a *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // // This function will be called at most once for each path value within a single process. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func createLockerForPath(path string, ro bool) (Locker, error) { - return &lockfile{locked: false}, nil +func createLockFileForPath(path string, ro bool) (*LockFile, error) { + return &LockFile{locked: false}, nil } -type lockfile struct { +// *LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { mu sync.Mutex file string locked bool } -func (l *lockfile) Lock() { - l.mu.Lock() - l.locked = true +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes. +type LastWrite struct { + // Nothing: The Windows “implementation” does not actually track writes. } -func (l *lockfile) RecursiveLock() { - // We don't support Windows but a recursive writer-lock in one process-space - // is really a writer lock, so just panic. - panic("not supported") +func (l *LockFile) Lock() { + l.mu.Lock() + l.locked = true } -func (l *lockfile) RLock() { +func (l *LockFile) RLock() { l.mu.Lock() l.locked = true } -func (l *lockfile) Unlock() { +func (l *LockFile) Unlock() { l.locked = false l.mu.Unlock() } -func (l *lockfile) Locked() bool { - return l.locked +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked” method, which exists only for sanity checks. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } +} + +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + l.AssertLocked() // The current implementation does not distinguish between read and write locks. +} + +// GetLastWrite() returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing) before this function is called. +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + return LastWrite{}, nil } -func (l *lockfile) Modified() (bool, error) { +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + return LastWrite{}, nil +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + return LastWrite{}, false, nil +} + +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { return false, nil } -func (l *lockfile) Touch() error { + +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { return nil } -func (l *lockfile) IsReadWrite() bool { +func (l *LockFile) IsReadWrite() bool { return false } -func (l *lockfile) TouchedSince(when time.Time) bool { +func (l *LockFile) TouchedSince(when time.Time) bool { stat, err := os.Stat(l.file) if err != nil { return true diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 6f072650537..de10e33244c 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go index ea6841958dd..da2ba46fe86 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go index a50de7f07a2..21a981007b3 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go index c9be05776d1..ec42872476a 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go index 1d1afeee2eb..a2a1d407231 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -1,16 +1,29 @@ +//go:build !windows // +build !windows package mount -import "golang.org/x/sys/unix" +import ( + "time" + + "golang.org/x/sys/unix" +) func unmount(target string, flags int) error { - err := unix.Unmount(target, flags) - if err == nil || err == unix.EINVAL { - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil + var err error + for i := 0; i < 50; i++ { + err = unix.Unmount(target, flags) + switch err { + case unix.EBUSY: + time.Sleep(50 * time.Millisecond) + continue + case unix.EINVAL, nil: + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + break } return &mountError{ diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go index eebc4ab84e2..d3a0cf51ce3 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package mount diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go index 7738fc7411e..6406cb14ff2 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Package kernel provides helper function to get, parse and compare kernel diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go index 20d67f78071..645790da648 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -43,7 +43,7 @@ func getRelease() (string, error) { prettyNames, err := shellwords.Parse(content[1]) if err != nil { - return "", fmt.Errorf("kernel version is invalid: %s", err.Error()) + return "", fmt.Errorf("kernel version is invalid: %w", err) } if len(prettyNames) != 2 { diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go index 7a68bc39bf4..ed8cca2c628 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd || solaris || openbsd // +build linux freebsd solaris openbsd // Package kernel provides helper function to get, parse and compare kernel diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index 3d382923686..b30da9fadb1 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package kernel diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go new file mode 100644 index 00000000000..e913fad0013 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go index 1da3f239fac..12671db5139 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -1,18 +1,14 @@ -// +build !linux,!solaris +//go:build openbsd +// +build openbsd package kernel import ( - "errors" + "fmt" + "runtime" ) -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - +// A stub called by kernel_unix.go . func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") + return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS) } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go new file mode 100644 index 00000000000..f515500c929 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go @@ -0,0 +1,11 @@ +//go:build !linux && !solaris && !freebsd +// +build !linux,!solaris,!freebsd + +package kernel + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go index 6f63ae99170..32d6a9f49a1 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index d3dd86d349f..87b43ed9505 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 5b3605f319c..77c93b4ab5b 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows && !freebsd && !solaris && !darwin // +build !linux,!windows,!freebsd,!solaris,!darwin package reexec diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index d868564767f..c46125ebf57 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package reexec diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go new file mode 100644 index 00000000000..ec879710634 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go @@ -0,0 +1,214 @@ +package regexp + +import ( + "io" + "regexp" + "sync" +) + +// Regexp is a wrapper struct used for wrapping MustCompile regex expressions +// used as global variables. Using this stucture helps speed the startup time +// of apps that want to use global regex variables. This library initializes them on +// first use as opposed to the start of the executable. +type Regexp struct { + once sync.Once + regexp *regexp.Regexp + val string +} + +func Delayed(val string) Regexp { + re := Regexp{ + val: val, + } + if precompile { + re.regexp = regexp.MustCompile(re.val) + } + return re +} + +func (re *Regexp) compile() { + if precompile { + return + } + re.once.Do(func() { + re.regexp = regexp.MustCompile(re.val) + }) +} + +func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + re.compile() + return re.regexp.Expand(dst, template, src, match) +} + +func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { + re.compile() + return re.regexp.ExpandString(dst, template, src, match) +} +func (re *Regexp) Find(b []byte) []byte { + re.compile() + return re.regexp.Find(b) +} + +func (re *Regexp) FindAll(b []byte, n int) [][]byte { + re.compile() + return re.regexp.FindAll(b, n) +} + +func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllIndex(b, n) +} + +func (re *Regexp) FindAllString(s string, n int) []string { + re.compile() + return re.regexp.FindAllString(s, n) +} + +func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringIndex(s, n) +} + +func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + re.compile() + return re.regexp.FindAllStringSubmatch(s, n) +} + +func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringSubmatchIndex(s, n) +} + +func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { + re.compile() + return re.regexp.FindAllSubmatch(b, n) +} + +func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllSubmatchIndex(b, n) +} + +func (re *Regexp) FindIndex(b []byte) (loc []int) { + re.compile() + return re.regexp.FindIndex(b) +} + +func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { + re.compile() + return re.regexp.FindReaderIndex(r) +} + +func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { + re.compile() + return re.regexp.FindReaderSubmatchIndex(r) +} + +func (re *Regexp) FindString(s string) string { + re.compile() + return re.regexp.FindString(s) +} + +func (re *Regexp) FindStringIndex(s string) (loc []int) { + re.compile() + return re.regexp.FindStringIndex(s) +} + +func (re *Regexp) FindStringSubmatch(s string) []string { + re.compile() + return re.regexp.FindStringSubmatch(s) +} + +func (re *Regexp) FindStringSubmatchIndex(s string) []int { + re.compile() + return re.regexp.FindStringSubmatchIndex(s) +} + +func (re *Regexp) FindSubmatch(b []byte) [][]byte { + re.compile() + return re.regexp.FindSubmatch(b) +} + +func (re *Regexp) FindSubmatchIndex(b []byte) []int { + re.compile() + return re.regexp.FindSubmatchIndex(b) +} + +func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { + re.compile() + return re.regexp.LiteralPrefix() +} + +func (re *Regexp) Longest() { + re.compile() + re.regexp.Longest() +} + +func (re *Regexp) Match(b []byte) bool { + re.compile() + return re.regexp.Match(b) +} + +func (re *Regexp) MatchReader(r io.RuneReader) bool { + re.compile() + return re.regexp.MatchReader(r) +} +func (re *Regexp) MatchString(s string) bool { + re.compile() + return re.regexp.MatchString(s) +} + +func (re *Regexp) NumSubexp() int { + re.compile() + return re.regexp.NumSubexp() +} + +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAll(src, repl) +} + +func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + re.compile() + return re.regexp.ReplaceAllFunc(src, repl) +} + +func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAllLiteral(src, repl) +} + +func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllLiteralString(src, repl) +} + +func (re *Regexp) ReplaceAllString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllString(src, repl) +} + +func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + re.compile() + return re.regexp.ReplaceAllStringFunc(src, repl) +} + +func (re *Regexp) Split(s string, n int) []string { + re.compile() + return re.regexp.Split(s, n) +} + +func (re *Regexp) String() string { + re.compile() + return re.regexp.String() +} + +func (re *Regexp) SubexpIndex(name string) int { + re.compile() + return re.regexp.SubexpIndex(name) +} + +func (re *Regexp) SubexpNames() []string { + re.compile() + return re.regexp.SubexpNames() +} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go new file mode 100644 index 00000000000..834dd943377 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go @@ -0,0 +1,6 @@ +//go:build !regexp_precompile +// +build !regexp_precompile + +package regexp + +const precompile = false diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go new file mode 100644 index 00000000000..a5fe0dbc49a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go @@ -0,0 +1,6 @@ +//go:build regexp_precompile +// +build regexp_precompile + +package regexp + +const precompile = true diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index 4c434f0e583..3ae44fd8ae9 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -9,18 +9,19 @@ import ( "math" "math/big" "math/rand" - "regexp" "strconv" "strings" "sync" "time" + + "github.com/containers/storage/pkg/regexp" ) const shortLen = 12 var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) + validShortID = regexp.Delayed("^[a-f0-9]{12}$") + validHex = regexp.Delayed(`^[a-f0-9]{64}$`) rngLock sync.Mutex rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held. diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go index 09d58bcbfdd..a208a6b5ba8 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go index 45428c141ca..bfb621dcc82 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go index 17935088ded..05642f60380 100644 --- a/vendor/github.com/containers/storage/pkg/system/init.go +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -6,7 +6,7 @@ import ( "unsafe" ) -// Used by chtimes +// maxTime is used by chtimes. var maxTime time.Time func init() { diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go new file mode 100644 index 00000000000..4eaeb5d69f1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go @@ -0,0 +1,56 @@ +//go:build freebsd +// +build freebsd + +package system + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Flag values from +const ( + /* + * Definitions of flags stored in file flags word. + * + * Super-user and owner changeable flags. + */ + UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */ + UF_NODUMP uint32 = 0x00000001 /* do not dump file */ + UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */ + UF_APPEND uint32 = 0x00000004 /* writes to file may only append */ + UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */ + UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */ + + UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */ + UF_SPARSE uint32 = 0x00000100 /* sparse file */ + UF_OFFLINE uint32 = 0x00000200 /* file is offline */ + UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */ + UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */ + UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */ + /* This is the same as the MacOS X definition of UF_HIDDEN. */ + UF_HIDDEN uint32 = 0x00008000 /* file is hidden */ + + /* + * Super-user changeable flags. + */ + SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */ + SF_ARCHIVED uint32 = 0x00010000 /* file is archived */ + SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */ + SF_APPEND uint32 = 0x00040000 /* writes to file may only append */ + SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */ + SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */ +) + +func Lchflags(path string, flags uint32) error { + p, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + return e1 + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go index cff33bb4085..42658c8b9a2 100644 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go index e9d301f090c..9b13e61468d 100644 --- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system @@ -14,7 +15,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{"Lstat", path, err} + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go index d66f1c5a41a..46cb40291f6 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go @@ -4,6 +4,7 @@ package system import ( + "errors" "fmt" "unsafe" @@ -70,7 +71,7 @@ func ReadMemInfo() (*MemInfo, error) { } if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, fmt.Errorf("getting system memory info %w", err) + return nil, errors.New("getting system memory info") } meminfo := &MemInfo{} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go index c276ce8e80d..d3d0ed8a128 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -1,3 +1,4 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd package system @@ -8,8 +9,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) +func Mknod(path string, mode uint32, dev uint32) error { + return unix.Mknod(path, mode, int(dev)) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go index d09005589af..53c3f2837e2 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package system @@ -17,6 +18,6 @@ func Mknod(path string, mode uint32, dev uint64) error { // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +func Mkdev(major int64, minor int64) uint64 { + return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go index 2e863c0215b..c35b1b346ad 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go index f3762e69d36..ff01143eef2 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go index a9a0dd7517e..7ee59d9262a 100644 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd || solaris || darwin // +build linux freebsd solaris darwin package system diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index b65121f1d4d..60c7d8bd9b4 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -1,6 +1,7 @@ package system import ( + "errors" "fmt" "os" "syscall" @@ -40,6 +41,19 @@ func EnsureRemoveAll(dir string) error { return nil } + // If the RemoveAll fails with a permission error, we + // may have immutable files so try to remove the + // immutable flag and redo the RemoveAll. + if errors.Is(err, syscall.EPERM) { + if err = resetFileFlags(dir); err != nil { + return fmt.Errorf("resetting file flags: %w", err) + } + err = os.RemoveAll(dir) + if err == nil { + return nil + } + } + pe, ok := err.(*os.PathError) if !ok { return err @@ -62,7 +76,7 @@ func EnsureRemoveAll(dir string) error { continue } - if pe.Err != syscall.EBUSY { + if !IsEBUSY(pe.Err) { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go new file mode 100644 index 00000000000..117eb1d6dc5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go @@ -0,0 +1,10 @@ +//go:build !freebsd +// +build !freebsd + +package system + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go new file mode 100644 index 00000000000..39a5de7aa4f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go @@ -0,0 +1,17 @@ +package system + +import ( + "io/fs" + "path/filepath" +) + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err := Lchflags(path, 0); err != nil { + return err + } + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go new file mode 100644 index 00000000000..e965c54c28c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -0,0 +1,13 @@ +//go:build !freebsd +// +build !freebsd + +package system + +type platformStatT struct { +} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms) + return 0 +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index 715f05b9387..9c510468f68 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -2,12 +2,25 @@ package system import "syscall" +type platformStatT struct { + flags uint32 +} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + return s.flags +} + // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + st := &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + dev: s.Dev} + st.flags = s.Flags + st.dev = s.Dev + return st, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index af7af20fa4d..e5dcba822d8 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -9,7 +9,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + dev: uint64(s.Dev)}, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go index 2fac918bfc3..47ae899f8b5 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system @@ -17,6 +18,8 @@ type StatT struct { rdev uint64 size int64 mtim syscall.Timespec + dev uint64 + platformStatT } // Mode returns file's permission mode. @@ -49,6 +52,11 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return s.dev +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index d306360520b..81edaadbb43 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -11,6 +11,7 @@ type StatT struct { mode os.FileMode size int64 mtim time.Time + platformStatT } // Size returns file's size. @@ -42,6 +43,11 @@ func (s StatT) GID() uint32 { return 0 } +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go index 5a10eda5afb..ad0337db771 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go index 13f1de1769c..9497596a011 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go index 6a77524376d..edc588a63f3 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -10,13 +10,14 @@ import ( // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + var _path *byte _path, err := unix.BytePtrFromString(path) if err != nil { return err } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go index 139714544d0..843ecdc53ad 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index 221eb78bc22..8bd7acf1fbd 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package system diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go index 74776e65e6f..8f073265194 100644 --- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -14,7 +14,7 @@ import ( var ( // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") + ErrEmptyPrefix = errors.New("prefix can't be empty") // ErrIllegalChar is returned when a space is in the ID ErrIllegalChar = errors.New("illegal character: ' '") @@ -25,7 +25,7 @@ var ( // ErrAmbiguousPrefix is returned if the prefix was ambiguous // (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { +type ErrAmbiguousPrefix struct { //nolint: errname prefix string } @@ -42,6 +42,7 @@ type TruncIndex struct { } // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +// Invalid IDs are _silently_ ignored. func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), @@ -51,7 +52,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) { trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), } for _, id := range ids { - idx.addID(id) + _ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem. } return } @@ -132,7 +133,8 @@ func (idx *TruncIndex) Get(s string) (string, error) { func (idx *TruncIndex) Iterate(handler func(id string)) { idx.Lock() defer idx.Unlock() - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + // Ignore the error from Visit: it can only fail if the provided visitor fails, and ours never does. + _ = idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { handler(string(prefix)) return nil }) diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go index 4f441c32c59..08dbc661da7 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go index a5005403afa..25054810aa1 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go index c854fdf5e47..00f397f3504 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -5,18 +5,12 @@ import ( "os" "os/user" "sync" - - "github.com/sirupsen/logrus" ) var ( homeDirOnce sync.Once homeDirErr error homeDir string - - hasCapSysAdminOnce sync.Once - hasCapSysAdminRet bool - hasCapSysAdminErr error ) // HomeDir returns the home directory for the current user. @@ -36,14 +30,3 @@ func HomeDir() (string, error) { }) return homeDir, homeDirErr } - -func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname - if err != nil { - if format != "" { - logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) - } else { - logrus.Errorf("%v", err) - } - os.Exit(1) - } -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go index 6a6f21d9c0f..fbfb90d599a 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -5,7 +5,7 @@ package unshare // #cgo CFLAGS: -Wall // extern void _containers_unshare(void); -// void __attribute__((constructor)) init(void) { +// static void __attribute__((constructor)) init(void) { // _containers_unshare(); // } import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go index 01cf33bde73..86ac12ecab2 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go index 2f95da7d8e9..21a43d38cb7 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -1,10 +1,11 @@ +//go:build linux && cgo && gccgo // +build linux,cgo,gccgo package unshare // #cgo CFLAGS: -Wall -Wextra // extern void _containers_unshare(void); -// void __attribute__((constructor)) init(void) { +// static void __attribute__((constructor)) init(void) { // _containers_unshare(); // } import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index c86390bd386..86922846e25 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -387,10 +387,47 @@ const ( UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" ) +// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped. +func hasFullUsersMappings() (bool, error) { + content, err := os.ReadFile("/proc/self/uid_map") + if err != nil { + return false, err + } + // The kernel rejects attempts to create mappings where either starting + // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 . + // So, if the uid_map contains 4294967295, the entire IDs space is available in the + // user namespace, so it is likely the initial user namespace. + return bytes.Contains(content, []byte("4294967295")), nil +} + +var ( + hasCapSysAdminOnce sync.Once + hasCapSysAdminRet bool + hasCapSysAdminErr error +) + // IsRootless tells us if we are running in rootless mode func IsRootless() bool { isRootlessOnce.Do(func() { isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" + if !isRootless { + hasCapSysAdmin, err := HasCapSysAdmin() + if err != nil { + logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") + } + if err == nil && !hasCapSysAdmin { + isRootless = true + } + } + if !isRootless { + hasMappings, err := hasFullUsersMappings() + if err != nil { + logrus.Warnf("Failed to read current user namespace mappings") + } + if err == nil && !hasMappings { + isRootless = true + } + } }) return isRootless } @@ -414,10 +451,21 @@ type Runnable interface { Run() error } +func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) + } + os.Exit(1) + } +} + // MaybeReexecUsingUserNamespace re-exec the process in a new namespace func MaybeReexecUsingUserNamespace(evenForRoot bool) { // If we've already been through this once, no need to try again. - if os.Geteuid() == 0 && IsRootless() { + if os.Geteuid() == 0 && GetRootlessUID() > 0 { return } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index e075bce13e3..6c419a952f8 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,4 +1,4 @@ -# This file is is the configuration file for all tools +# This file is the configuration file for all tools # that use the containers/storage library. The storage.conf file # overrides all other storage.conf files. Container engines using the # container/storage library do not inherit fields from other storage.conf @@ -32,6 +32,10 @@ graphroot = "/var/lib/containers/storage" # # rootless_storage_path = "$HOME/.local/share/containers/storage" +# Transient store mode makes all container metadata be saved in temporary storage +# (i.e. runroot above). This is faster, but doesn't persist across reboots. +# transient_store = true + [storage.options] # Storage options to be passed to underlying storage drivers @@ -150,7 +154,7 @@ mountopt = "nodev" # future. When "force_mask" is set the original permission mask is stored in # the "user.containers.override_stat" xattr and the "mount_program" option must # be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the -# extended attribute permissions to processes within containers rather then the +# extended attribute permissions to processes within containers rather than the # "force_mask" permissions. # # force_mask = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 8297d3c2c76..d208e0bfafb 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -21,6 +20,7 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/system" @@ -28,6 +28,7 @@ import ( "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" ) type updateNameOperation int @@ -38,62 +39,39 @@ const ( removeNames ) +const ( + volatileFlag = "Volatile" + mountLabelFlag = "MountLabel" + processLabelFlag = "ProcessLabel" + mountOptsFlag = "MountOpts" +) + var ( stores []*store storesLock sync.Mutex ) -// ROFileBasedStore wraps up the methods of the various types of file-based -// data stores that we implement which are needed for both read-only and -// read-write files. -type ROFileBasedStore interface { - Locker - - // Load reloads the contents of the store from disk. It should be called - // with the lock held. - Load() error - - // ReloadIfChanged reloads the contents of the store from disk if it is changed. - ReloadIfChanged() error -} - -// RWFileBasedStore wraps up the methods of various types of file-based data -// stores that we implement using read-write files. -type RWFileBasedStore interface { - // Save saves the contents of the store to disk. It should be called with - // the lock held, and Touch() should be called afterward before releasing the - // lock. - Save() error -} - -// FileBasedStore wraps up the common methods of various types of file-based -// data stores that we implement. -type FileBasedStore interface { - ROFileBasedStore - RWFileBasedStore -} - -// ROMetadataStore wraps a method for reading metadata associated with an ID. -type ROMetadataStore interface { +// roMetadataStore wraps a method for reading metadata associated with an ID. +type roMetadataStore interface { // Metadata reads metadata associated with an item with the specified ID. Metadata(id string) (string, error) } -// RWMetadataStore wraps a method for setting metadata associated with an ID. -type RWMetadataStore interface { +// rwMetadataStore wraps a method for setting metadata associated with an ID. +type rwMetadataStore interface { // SetMetadata updates the metadata associated with the item with the specified ID. SetMetadata(id, metadata string) error } -// MetadataStore wraps up methods for getting and setting metadata associated with IDs. -type MetadataStore interface { - ROMetadataStore - RWMetadataStore +// metadataStore wraps up methods for getting and setting metadata associated with IDs. +type metadataStore interface { + roMetadataStore + rwMetadataStore } -// An ROBigDataStore wraps up the read-only big-data related methods of the +// An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. -type ROBigDataStore interface { +type roBigDataStore interface { // BigData retrieves a (potentially large) piece of data associated with // this ID, if it has previously been set. BigData(id, key string) ([]byte, error) @@ -111,8 +89,8 @@ type ROBigDataStore interface { BigDataNames(id string) ([]string, error) } -// A RWImageBigDataStore wraps up how we store big-data associated with images. -type RWImageBigDataStore interface { +// A rwImageBigDataStore wraps up how we store big-data associated with images. +type rwImageBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. // Pass github.com/containers/image/manifest.Digest as digestManifest @@ -120,16 +98,16 @@ type RWImageBigDataStore interface { SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error } -// A ContainerBigDataStore wraps up how we store big-data associated with containers. -type ContainerBigDataStore interface { - ROBigDataStore +// A containerBigDataStore wraps up how we store big-data associated with containers. +type containerBigDataStore interface { + roBigDataStore // SetBigData stores a (potentially large) piece of data associated // with this ID. SetBigData(id, key string, data []byte) error } -// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers. -type ROLayerBigDataStore interface { +// A roLayerBigDataStore wraps up how we store RO big-data associated with layers. +type roLayerBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. BigData(id, key string) (io.ReadCloser, error) @@ -139,21 +117,15 @@ type ROLayerBigDataStore interface { BigDataNames(id string) ([]string, error) } -// A RWLayerBigDataStore wraps up how we store big-data associated with layers. -type RWLayerBigDataStore interface { +// A rwLayerBigDataStore wraps up how we store big-data associated with layers. +type rwLayerBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. SetBigData(id, key string, data io.Reader) error } -// A LayerBigDataStore wraps up how we store big-data associated with layers. -type LayerBigDataStore interface { - ROLayerBigDataStore - RWLayerBigDataStore -} - -// A FlaggableStore can have flags set and cleared on items which it manages. -type FlaggableStore interface { +// A flaggableStore can have flags set and cleared on items which it manages. +type flaggableStore interface { // ClearFlag removes a named flag from an item in the store. ClearFlag(id string, flag string) error @@ -170,6 +142,7 @@ type Store interface { // settings that were passed to GetStore() when the object was created. RunRoot() string GraphRoot() string + TransientStore() bool GraphDriverName() string GraphOptions() []string PullOptions() map[string]string @@ -274,6 +247,8 @@ type Store interface { // Unmount attempts to unmount an image, given an ID. // Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. UnmountImage(id string, force bool) (bool, error) // Mount attempts to mount a layer, image, or container for access, and @@ -292,9 +267,15 @@ type Store interface { // Unmount attempts to unmount a layer, image, or container, given an ID, a // name, or a mount path. Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. Unmount(id string, force bool) (bool, error) // Mounted returns number of times the layer has been mounted. + // + // WARNING: This value might already be obsolete by the time it is returned; + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. Mounted(id string) (int, error) // Changes returns a summary of the changes which would need to be made @@ -410,7 +391,7 @@ type Store interface { SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error // ListLayerBigData retrieves a list of the (possibly large) chunks of - // named data associated with an layer. + // named data associated with a layer. ListLayerBigData(id string) ([]string, error) // LayerBigData retrieves a (possibly large) chunk of named data @@ -531,6 +512,11 @@ type Store interface { // Releasing AdditionalLayer handler is caller's responsibility. // This API is experimental and can be changed without bumping the major version number. LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) + + // Tries to clean up remainders of previous containers or layers that are not + // references in the json files. These can happen in the case of unclean + // shutdowns or regular restarts in transient store mode. + GarbageCollect() error } // AdditionalLayer reprents a layer that is contained in the additional layer store @@ -574,6 +560,8 @@ type LayerOptions struct { // and reliably known by the caller. // Use the default "" if this fields is not applicable or the value is not known. UncompressedDigest digest.Digest + // True is the layer info can be treated as volatile + Volatile bool } // ImageOptions is used for passing options to a Store's CreateImage() method. @@ -600,29 +588,57 @@ type ContainerOptions struct { } type store struct { - lastLoaded time.Time - runRoot string - graphLock Locker - usernsLock Locker + // # Locking hierarchy: + // These locks do not all need to be held simultaneously, but if some code does need to lock more than one, it MUST do so in this order: + // - graphLock + // - layerStore.start{Reading,Writing} + // - roLayerStores[].startReading (in the order of the items of the roLayerStores array) + // - imageStore.start{Reading,Writing} + // - roImageStores[].startReading (in the order of the items of the roImageStores array) + // - containerStore.start{Reading,Writing} + + // The following fields are only set when constructing store, and must never be modified afterwards. + // They are safe to access without any other locking. + runRoot string + graphDriverName string // Initially set to the user-requested value, possibly ""; updated during store construction, and does not change afterwards. + graphDriverPriority []string + // graphLock: + // - Ensures that we always reload graphDriver, and the primary layer store, after any process does store.Shutdown. This is necessary + // because (??) the Shutdown may forcibly unmount and clean up, affecting graph driver state in a way only a graph driver + // and layer store reinitialization can notice. + // - Ensures that store.Shutdown is exclusive with mount operations. This is necessary at because some + // graph drivers call mount.MakePrivate() during initialization, the mount operations require that, and the driver’s Cleanup() method + // may undo that. So, holding graphLock is required throughout the duration of Shutdown(), and the duration of any mount + // (but not unmount) calls. + // - Within this store object, protects access to some related in-memory state. + graphLock *lockfile.LockFile + usernsLock *lockfile.LockFile graphRoot string - graphDriverName string graphOptions []string pullOptions map[string]string uidMap []idtools.IDMap gidMap []idtools.IDMap autoUsernsUser string - additionalUIDs *idSet // Set by getAvailableIDs() - additionalGIDs *idSet // Set by getAvailableIDs() autoNsMinSize uint32 autoNsMaxSize uint32 - graphDriver drivers.Driver - layerStore LayerStore - roLayerStores []ROLayerStore - imageStore ImageStore - roImageStores []ROImageStore - containerStore ContainerStore + imageStore rwImageStore + roImageStores []roImageStore + containerStore rwContainerStore digestLockRoot string disableVolatile bool + transientStore bool + + // The following fields can only be accessed with graphLock held. + graphLockLastWrite lockfile.LastWrite + // FIXME: This field is only set when holding graphLock, but locking rules of the driver + // interface itself are not documented here. It is extensively used without holding graphLock. + graphDriver drivers.Driver + layerStoreUseGetters rwLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + roLayerStoresUseGetters []roLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + + // FIXME: The following fields need locking, and don’t have it. + additionalUIDs *idSet // Set by getAvailableIDs() + additionalGIDs *idSet // Set by getAvailableIDs() } // GetStore attempts to find an already-created Store object matching the @@ -632,16 +648,17 @@ type store struct { // If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. // // These defaults observe environment variables: -// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use -// * `STORAGE_OPTS` for the string of options to pass to the driver +// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// - `STORAGE_OPTS` for the string of options to pass to the driver // // Note that we do some of this work in a child process. The calling process's // main() function needs to import our pkg/reexec package and should begin with // something like this in order to allow us to properly start that child // process: -// if reexec.Init() { -// return -// } +// +// if reexec.Init() { +// return +// } func GetStore(options types.StoreOptions) (Store, error) { defaultOpts, err := types.Options() if err != nil { @@ -692,18 +709,16 @@ func GetStore(options types.StoreOptions) (Store, error) { if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { return nil, err } - for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { - if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil { - return nil, err - } + if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0700); err != nil { + return nil, err } - graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) if err != nil { return nil, err } - usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock")) + usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock")) if err != nil { return nil, err } @@ -717,21 +732,24 @@ func GetStore(options types.StoreOptions) (Store, error) { autoNsMaxSize = AutoUserNsMaxSize } s := &store{ - runRoot: options.RunRoot, - graphLock: graphLock, - graphRoot: options.GraphRoot, - graphDriverName: options.GraphDriverName, - graphOptions: options.GraphDriverOptions, - uidMap: copyIDMap(options.UIDMap), - gidMap: copyIDMap(options.GIDMap), - autoUsernsUser: options.RootAutoNsUser, - autoNsMinSize: autoNsMinSize, - autoNsMaxSize: autoNsMaxSize, - additionalUIDs: nil, - additionalGIDs: nil, - usernsLock: usernsLock, - disableVolatile: options.DisableVolatile, - pullOptions: options.PullOptions, + runRoot: options.RunRoot, + graphDriverName: options.GraphDriverName, + graphDriverPriority: options.GraphDriverPriority, + graphLock: graphLock, + usernsLock: usernsLock, + graphRoot: options.GraphRoot, + graphOptions: options.GraphDriverOptions, + pullOptions: options.PullOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + autoUsernsUser: options.RootAutoNsUser, + autoNsMinSize: autoNsMinSize, + autoNsMaxSize: autoNsMaxSize, + disableVolatile: options.DisableVolatile, + transientStore: options.TransientStore, + + additionalUIDs: nil, + additionalGIDs: nil, } if err := s.load(); err != nil { return nil, err @@ -778,6 +796,10 @@ func (s *store) GraphRoot() string { return s.graphRoot } +func (s *store) TransientStore() bool { + return s.transientStore +} + func (s *store) GraphOptions() []string { return s.graphOptions } @@ -798,13 +820,27 @@ func (s *store) GIDMap() []idtools.IDMap { return copyIDMap(s.gidMap) } +// This must only be called when constructing store; it writes to fields that are assumed to be constant after constrution. func (s *store) load() error { - driver, err := s.GraphDriver() - if err != nil { + var driver drivers.Driver + if err := func() error { // A scope for defer + s.graphLock.Lock() + defer s.graphLock.Unlock() + lastWrite, err := s.graphLock.GetLastWrite() + if err != nil { + return err + } + s.graphLockLastWrite = lastWrite + driver, err = s.createGraphDriverLocked() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return nil + }(); err != nil { return err } - s.graphDriver = driver - s.graphDriverName = driver.String() driverPrefix := s.graphDriverName + "-" gipath := filepath.Join(s.graphRoot, driverPrefix+"images") @@ -816,22 +852,21 @@ func (s *store) load() error { return err } s.imageStore = ris - if _, err := s.ROImageStores(); err != nil { - return err - } gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0700); err != nil { return err } - rcs, err := newContainerStore(gcpath) - if err != nil { - return err - } rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") if err := os.MkdirAll(rcpath, 0700); err != nil { return err } + + rcs, err := newContainerStore(gcpath, rcpath, s.transientStore) + if err != nil { + return err + } + s.containerStore = rcs for _, store := range driver.AdditionalImageStores() { @@ -853,57 +888,86 @@ func (s *store) load() error { // GetDigestLock returns a digest-specific Locker. func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { - return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) + return lockfile.GetLockFile(filepath.Join(s.digestLockRoot, d.String())) } -func (s *store) getGraphDriver() (drivers.Driver, error) { - if s.graphDriver != nil { - return s.graphDriver, nil +// startUsingGraphDriver obtains s.graphLock and ensures that s.graphDriver is set and fresh. +// It only intended to be used on a fully-constructed store. +// If this succeeds, the caller MUST call stopUsingGraphDriver(). +func (s *store) startUsingGraphDriver() error { + s.graphLock.Lock() + succeeded := false + defer func() { + if !succeeded { + s.graphLock.Unlock() + } + }() + + lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite) + if err != nil { + return err } + if modified { + driver, err := s.createGraphDriverLocked() + if err != nil { + return err + } + // Our concurrency design requires s.graphDriverName not to be modified after + // store is constructed. + // It’s fine for driver.String() not to match the requested graph driver name + // (e.g. if the user asks for overlay2 and gets overlay), but it must be an idempotent + // mapping: + // driver1 := drivers.New(userInput, config) + // name1 := driver1.String() + // name2 := drivers.New(name1, config).String() + // assert(name1 == name2) + if s.graphDriverName != driver.String() { + return fmt.Errorf("graph driver name changed from %q to %q during reload", + s.graphDriverName, driver.String()) + } + s.graphDriver = driver + s.layerStoreUseGetters = nil + s.graphLockLastWrite = lastWrite + } + + succeeded = true + return nil +} + +// stopUsingGraphDriver releases graphLock obtained by startUsingGraphDriver. +func (s *store) stopUsingGraphDriver() { + s.graphLock.Unlock() +} + +// createGraphDriverLocked creates a new instance of graph driver for s, and returns it. +// Almost all users should use startUsingGraphDriver instead. +// The caller must hold s.graphLock. +func (s *store) createGraphDriverLocked() (drivers.Driver, error) { config := drivers.Options{ - Root: s.graphRoot, - RunRoot: s.runRoot, - DriverOptions: s.graphOptions, - UIDMaps: s.uidMap, - GIDMaps: s.gidMap, + Root: s.graphRoot, + RunRoot: s.runRoot, + DriverPriority: s.graphDriverPriority, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, } - driver, err := drivers.New(s.graphDriverName, config) - if err != nil { - return nil, err - } - s.graphDriver = driver - s.graphDriverName = driver.String() - return driver, nil + return drivers.New(s.graphDriverName, config) } func (s *store) GraphDriver() (drivers.Driver, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() + if err := s.startUsingGraphDriver(); err != nil { + return nil, err } - return s.getGraphDriver() + defer s.stopUsingGraphDriver() + return s.graphDriver, nil } -// LayerStore obtains and returns a handle to the writeable layer store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) LayerStore() (LayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() - } - if s.layerStore != nil { - return s.layerStore, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err +// getLayerStoreLocked obtains and returns a handle to the writeable layer store object +// used by the Store. +// It must be called with s.graphLock held. +func (s *store) getLayerStoreLocked() (rwLayerStore, error) { + if s.layerStoreUseGetters != nil { + return s.layerStoreUseGetters, nil } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") @@ -914,76 +978,238 @@ func (s *store) LayerStore() (LayerStore, error) { if err := os.MkdirAll(glpath, 0700); err != nil { return nil, err } - rls, err := s.newLayerStore(rlpath, glpath, driver) + rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) if err != nil { return nil, err } - s.layerStore = rls - return s.layerStore, nil + s.layerStoreUseGetters = rls + return s.layerStoreUseGetters, nil } -// ROLayerStores obtains additional read/only layer store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not part of the exported Store interface. -func (s *store) ROLayerStores() ([]ROLayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.roLayerStores != nil { - return s.roLayerStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { +// getLayerStore obtains and returns a handle to the writeable layer store object +// used by the store. +// It must be called WITHOUT s.graphLock held. +func (s *store) getLayerStore() (rwLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { return nil, err } + defer s.stopUsingGraphDriver() + return s.getLayerStoreLocked() +} + +// getROLayerStoresLocked obtains additional read/only layer store objects used by the +// Store. +// It must be called with s.graphLock held. +func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { + if s.roLayerStoresUseGetters != nil { + return s.roLayerStoresUseGetters, nil + } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") if err := os.MkdirAll(rlpath, 0700); err != nil { return nil, err } - for _, store := range driver.AdditionalImageStores() { + for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") - rls, err := newROLayerStore(rlpath, glpath, driver) + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) if err != nil { return nil, err } - s.roLayerStores = append(s.roLayerStores, rls) + s.roLayerStoresUseGetters = append(s.roLayerStoresUseGetters, rls) + } + return s.roLayerStoresUseGetters, nil +} + +// bothLayerStoreKindsLocked returns the primary, and additional read-only, layer store objects used by the store. +// It must be called with s.graphLock held. +func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error) { + primary, err := s.getLayerStoreLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading primary layer store data: %w", err) + } + additional, err := s.getROLayerStoresLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading additional layer stores: %w", err) + } + return primary, additional, nil +} + +// bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store. +// It must be called with s.graphLock held. +func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, nil, err + } + defer s.stopUsingGraphDriver() + return s.bothLayerStoreKindsLocked() +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called with s.graphLock held. +func (s *store) allLayerStoresLocked() ([]roLayerStore, error) { + primary, additional, err := s.bothLayerStoreKindsLocked() + if err != nil { + return nil, err + } + return append([]roLayerStore{primary}, additional...), nil +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called WITHOUT s.graphLock held. +func (s *store) allLayerStores() ([]roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + return s.allLayerStoresLocked() +} + +// readAllLayerStores processes allLayerStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any layer store fails, it immediately returns (true, err). +// +// If all layer stores are processed without setting done == true, it returns (false, nil). +// +// Typical usage: +// +// var res T = failureValue +// if done, err := s.readAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func (s *store) readAllLayerStores(fn func(store roLayerStore) (bool, error)) (bool, error) { + layerStores, err := s.allLayerStores() + if err != nil { + return true, err + } + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return true, err + } + defer store.stopReading() + if done, err := fn(store); done { + return true, err + } + } + return false, nil +} + +// writeToLayerStore is a helper for working with store.getLayerStore(): +// It locks the store for writing, checks for updates, and calls fn() +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToLayerStore(fn func(store rwLayerStore) error) error { + store, err := s.getLayerStore() + if err != nil { + return err + } + + if err := store.startWriting(); err != nil { + return err } - return s.roLayerStores, nil + defer store.stopWriting() + return fn(store) } -// ImageStore obtains and returns a handle to the writable image store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ImageStore() (ImageStore, error) { - if s.imageStore != nil { - return s.imageStore, nil +// allImageStores returns a list of all image store objects used by the Store. +// This is a convenience method for read-only users of the Store. +func (s *store) allImageStores() []roImageStore { + return append([]roImageStore{s.imageStore}, s.roImageStores...) +} + +// readAllImageStores processes allImageStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any Image store fails, it immediately returns (true, err). +// +// If all Image stores are processed without setting done == true, it returns (false, nil). +// +// Typical usage: +// +// var res T = failureValue +// if done, err := s.readAllImageStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func (s *store) readAllImageStores(fn func(store roImageStore) (bool, error)) (bool, error) { + for _, s := range s.allImageStores() { + store := s + if err := store.startReading(); err != nil { + return true, err + } + defer store.stopReading() + if done, err := fn(store); done { + return true, err + } } - return nil, ErrLoadError + return false, nil } -// ROImageStores obtains additional read/only image store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ROImageStores() ([]ROImageStore, error) { - if s.imageStore == nil { - return nil, ErrLoadError +// writeToImageStore is a convenience helper for working with store.getImageStore(): +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore. +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToImageStore(fn func() error) error { + if err := s.imageStore.startWriting(); err != nil { + return err } + defer s.imageStore.stopWriting() + return fn() +} - return s.roImageStores, nil +// writeToContainerStore is a convenience helper for working with store.getContainerStore(): +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore. +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToContainerStore(fn func() error) error { + if err := s.containerStore.startWriting(); err != nil { + return err + } + defer s.containerStore.stopWriting() + return fn() } -// ContainerStore obtains and returns a handle to the container store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ContainerStore() (ContainerStore, error) { - if s.containerStore != nil { - return s.containerStore, nil +// writeToAllStores is a convenience helper for writing to all three stores: +// It locks the stores for writing, checks for updates, and calls fn(), which can then access the provided layer store, +// s.imageStore and s.containerStore. +// It returns the return value of fn, or its own error initializing the stores. +func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { + rlstore, err := s.getLayerStore() + if err != nil { + return err + } + + if err := rlstore.startWriting(); err != nil { + return err + } + defer rlstore.stopWriting() + if err := s.imageStore.startWriting(); err != nil { + return err + } + defer s.imageStore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { + return err } - return nil, ErrLoadError + defer s.containerStore.stopWriting() + + return fn(rlstore) } -func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { - if s.graphDriver == nil || !s.graphDriver.SupportsShifting() { +// canUseShifting returns ??? +// store must be locked for writing. +func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool { + if !store.supportsShifting() { return false } if uidmap != nil && !idtools.IsContiguous(uidmap) { @@ -997,28 +1223,18 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { var parentLayer *Layer - rlstore, err := s.LayerStore() - if err != nil { - return nil, -1, err - } - rlstores, err := s.ROLayerStores() + rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, -1, err } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startWriting(); err != nil { return nil, -1, err } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + defer rlstore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { return nil, -1, err } + defer s.containerStore.stopWriting() if options == nil { options = &LayerOptions{} } @@ -1032,14 +1248,13 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w gidMap := options.GIDMap if parent != "" { var ilayer *Layer - for _, l := range append([]ROLayerStore{rlstore}, rlstores...) { + for _, l := range append([]roLayerStore{rlstore}, rlstores...) { lstore := l if lstore != rlstore { - lstore.RLock() - defer lstore.Unlock() - if err := lstore.ReloadIfChanged(); err != nil { + if err := lstore.startReading(); err != nil { return nil, -1, err } + defer lstore.stopReading() } if l, err := lstore.Get(parent); err == nil && l != nil { ilayer = l @@ -1051,7 +1266,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, ErrLayerUnknown } parentLayer = ilayer - containers, err := rcstore.Containers() + containers, err := s.containerStore.Containers() if err != nil { return nil, -1, err } @@ -1078,7 +1293,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w OriginalDigest: options.OriginalDigest, UncompressedDigest: options.UncompressedDigest, } - if s.canUseShifting(uidMap, gidMap) { + if canUseShifting(rlstore, uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} } else { layerOptions.IDMappingOptions = types.IDMappingOptions{ @@ -1098,27 +1313,17 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { if layer != "" { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() + layerStores, err := s.allLayerStores() if err != nil { return nil, err } var ilayer *Layer - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - if store == lstore { - store.Lock() - } else { - store.RLock() - } - defer store.Unlock() - err := store.ReloadIfChanged() - if err != nil { + if err := store.startReading(); err != nil { return nil, err } + defer store.stopReading() ilayer, err = store.Get(layer) if err == nil { break @@ -1130,28 +1335,30 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o layer = ilayer.ID } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return nil, err - } - - creationDate := time.Now().UTC() - if options != nil && !options.CreationDate.IsZero() { - creationDate = options.CreationDate - } + var res *Image + err := s.writeToImageStore(func() error { + creationDate := time.Now().UTC() + if options != nil && !options.CreationDate.IsZero() { + creationDate = options.CreationDate + } - return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) + var err error + res, err = s.imageStore.Create(id, names, layer, metadata, creationDate, options.Digest) + return err + }) + return res, err } -func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options types.IDMappingOptions) (*Layer, error) { +// imageTopLayerForMapping does ??? +// On entry: +// - ristore must be locked EITHER for reading or writing +// - s.imageStore must be locked for writing; it might be identical to ristore. +// - rlstore must be locked for writing +// - lstores must all be locked for reading +func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) { layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { // If the driver supports shifting and the layer has no mappings, we can use it. - if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + if canUseShifting(rlstore, options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { return true } // If we want host mapping, and the layer uses mappings, it's not the best match. @@ -1165,17 +1372,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) } var layer, parentLayer *Layer - allStores := append([]ROLayerStore{rlstore}, lstores...) + allStores := append([]roLayerStore{rlstore}, lstores...) // Locate the image's top layer and its parent, if it has one. + createMappedLayer := ristore == s.imageStore for _, s := range allStores { store := s - if store != rlstore { - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } - } // Walk the top layer list. for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { if cLayer, err := store.Get(candidate); err == nil { @@ -1221,44 +1422,41 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea return layer, nil } // The top layer's mappings don't match the ones we want, and it's in an image store - // that lets us edit image metadata... - if istore, ok := ristore.(*imageStore); ok { - // ... so create a duplicate of the layer with the desired mappings, and - // register it as an alternate top layer in the image. - var layerOptions LayerOptions - if s.canUseShifting(options.UIDMap, options.GIDMap) { - layerOptions = LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ - HostUIDMapping: true, - HostGIDMapping: true, - UIDMap: nil, - GIDMap: nil, - }, - } - } else { - layerOptions = LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ - HostUIDMapping: options.HostUIDMapping, - HostGIDMapping: options.HostGIDMapping, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - }, - } + // that lets us edit image metadata, so create a duplicate of the layer with the desired + // mappings, and register it as an alternate top layer in the image. + var layerOptions LayerOptions + if canUseShifting(rlstore, options.UIDMap, options.GIDMap) { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, } - layerOptions.TemplateLayer = layer.ID - mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) - if err != nil { - return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) + } else { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, } - if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { - if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { - err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) - } - return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) + if err != nil { + return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) + } + // By construction, createMappedLayer can only be true if ristore == s.imageStore. + if err = s.imageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) } - layer = mappedLayer + return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) } - return layer, nil + return mappedLayer, nil } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -1271,7 +1469,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if options.HostGIDMapping { options.GIDMap = nil } - rlstore, err := s.LayerStore() + rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != "" if err != nil { return nil, err } @@ -1288,45 +1486,41 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat defer s.usernsLock.Unlock() } - var imageHomeStore ROImageStore - var istore ImageStore - var istores []ROImageStore - var lstores []ROLayerStore - var cimage *Image + var imageHomeStore roImageStore // Set if image != "" + // s.imageStore is locked read-write, if image != "" + // s.roImageStores are NOT NECESSARILY ALL locked read-only if image != "" + var cimage *Image // Set if image != "" if image != "" { - var err error - lstores, err = s.ROLayerStores() - if err != nil { - return nil, err - } - istore, err = s.ImageStore() - if err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } - istores, err = s.ROImageStores() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { + defer rlstore.stopWriting() + for _, s := range lstores { store := s - if store == istore { - store.Lock() - } else { - store.RLock() - } - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return nil, err } - cimage, err = store.Get(image) - if err == nil { - imageHomeStore = store - break + defer store.stopReading() + } + if err := s.imageStore.startWriting(); err != nil { + return nil, err + } + defer s.imageStore.stopWriting() + cimage, err = s.imageStore.Get(image) + if err == nil { + imageHomeStore = s.imageStore + } else { + for _, s := range s.roImageStores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } } } if cimage == nil { @@ -1337,7 +1531,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if options.AutoUserNs { var err error - options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage) + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage, rlstore, lstores) if err != nil { return nil, err } @@ -1349,8 +1543,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat idMappingsOptions := options.IDMappingOptions if image != "" { if cimage.TopLayer != "" { - createMappedLayer := imageHomeStore == istore - ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions) + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idMappingsOptions) if err != nil { return nil, err } @@ -1364,11 +1557,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } } else { - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() if !options.HostUIDMapping && len(options.UIDMap) == 0 { uidMap = s.uidMap } @@ -1376,34 +1568,36 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat gidMap = s.gidMap } } - var layerOptions *LayerOptions - if s.canUseShifting(uidMap, gidMap) { - layerOptions = &LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ + layerOptions := &LayerOptions{ + // Normally layers for containers are volatile only if the container is. + // But in transient store mode, all container layers are volatile. + Volatile: options.Volatile || s.transientStore, + } + if canUseShifting(rlstore, uidMap, gidMap) { + layerOptions.IDMappingOptions = + types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil, - }, - } + } } else { - layerOptions = &LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ + layerOptions.IDMappingOptions = + types.IDMappingOptions{ HostUIDMapping: idMappingsOptions.HostUIDMapping, HostGIDMapping: idMappingsOptions.HostGIDMapping, UIDMap: copyIDMap(uidMap), GIDMap: copyIDMap(gidMap), - }, - } + } + } if options.Flags == nil { options.Flags = make(map[string]interface{}) } - plabel, _ := options.Flags["ProcessLabel"].(string) - mlabel, _ := options.Flags["MountLabel"].(string) - if (plabel == "" && mlabel != "") || - (plabel != "" && mlabel == "") { - return nil, errors.New("processLabel and Mountlabel must either not be specified or both specified") + plabel, _ := options.Flags[processLabelFlag].(string) + mlabel, _ := options.Flags[mountLabelFlag].(string) + if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") { + return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified") } if plabel == "" { @@ -1411,232 +1605,156 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - options.Flags["ProcessLabel"] = processLabel - options.Flags["MountLabel"] = mountLabel + mlabel = mountLabel + options.Flags[processLabelFlag] = processLabel + options.Flags[mountLabelFlag] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true) if err != nil { return nil, err } layer = clayer.ID - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - options.IDMappingOptions = types.IDMappingOptions{ - HostUIDMapping: len(options.UIDMap) == 0, - HostGIDMapping: len(options.GIDMap) == 0, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - } - container, err := rcstore.Create(id, names, imageID, layer, metadata, options) - if err != nil || container == nil { - rlstore.Delete(layer) + + // Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile + if s.transientStore { + options.Volatile = true } + + var container *Container + err = s.writeToContainerStore(func() error { + options.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + var err error + container, err = s.containerStore.Create(id, names, imageID, layer, metadata, options) + if err != nil || container == nil { + if err2 := rlstore.Delete(layer); err2 != nil { + if err == nil { + err = fmt.Errorf("deleting layer %#v: %w", layer, err2) + } else { + logrus.Errorf("While recovering from a failure to create a container, error deleting layer %#v: %v", layer, err2) + } + } + } + return err + }) return container, err } func (s *store) SetMetadata(id, metadata string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if s.imageStore.Exists(id) { + return s.imageStore.SetMetadata(id, metadata) + } + if s.containerStore.Exists(id) { + return s.containerStore.SetMetadata(id, metadata) + } + return ErrNotAnID + }) +} - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err +func (s *store) Metadata(id string) (string, error) { + var res string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if store.Exists(id) { + var err error + res, err = store.Metadata(id) + return true, err + } + return false, nil + }); done { + return res, err } - if rlstore.Exists(id) { - return rlstore.SetMetadata(id, metadata) + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if store.Exists(id) { + var err error + res, err = store.Metadata(id) + return true, err + } + return false, nil + }); done { + return res, err } - if ristore.Exists(id) { - return ristore.SetMetadata(id, metadata) + + if err := s.containerStore.startReading(); err != nil { + return "", err } - if rcstore.Exists(id) { - return rcstore.SetMetadata(id, metadata) + defer s.containerStore.stopReading() + if s.containerStore.Exists(id) { + return s.containerStore.Metadata(id) } - return ErrNotAnID -} - -func (s *store) Metadata(id string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } - if store.Exists(id) { - return store.Metadata(id) - } - } - - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } - if store.Exists(id) { - return store.Metadata(id) - } - } - - cstore, err := s.ContainerStore() - if err != nil { - return "", err - } - cstore.RLock() - defer cstore.Unlock() - if err := cstore.ReloadIfChanged(); err != nil { - return "", err - } - if cstore.Exists(id) { - return cstore.Metadata(id) - } - return "", ErrNotAnID + return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { bigDataNames, err := store.BigDataNames(id) if err == nil { - return bigDataNames, err + res = bigDataNames + return true, nil } + return false, nil + }); done { + return res, err } return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - istore, err := s.ImageStore() - if err != nil { - return -1, err - } - istores, err := s.ROImageStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { size, err := store.BigDataSize(id, key) if err == nil { - return size, nil + res = size + return true, nil } + return false, nil + }); done { + return res, err } return -1, ErrSizeUnknown } func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { - ristore, err := s.ImageStore() - if err != nil { - return "", err - } - stores, err := s.ROImageStores() - if err != nil { - return "", err - } - stores = append([]ROImageStore{ristore}, stores...) - for _, r := range stores { - ristore := r - ristore.RLock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return "", err - } + var res digest.Digest + if done, err := s.readAllImageStores(func(ristore roImageStore) (bool, error) { d, err := ristore.BigDataDigest(id, key) if err == nil && d.Validate() == nil { - return d, nil + res = d + return true, nil } + return false, nil + }); done { + return res, err } return "", ErrDigestUnknown } func (s *store) ImageBigData(id, key string) ([]byte, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } foundImage := false - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []byte + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { data, err := store.BigData(id, key) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundImage = true } + return false, nil + }); done { + return res, err } if foundImage { return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist) @@ -1647,29 +1765,20 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with an layer. func (s *store) ListLayerBigData(id string) ([]string, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } foundLayer := false - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { data, err := store.BigDataNames(id) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundLayer = true } + return false, nil + }); done { + return res, err } if foundLayer { return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist) @@ -1680,29 +1789,20 @@ func (s *store) ListLayerBigData(id string) ([]string, error) { // LayerBigData retrieves a (possibly large) chunk of named data // associated with a layer. func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } foundLayer := false - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res io.ReadCloser + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { data, err := store.BigData(id, key) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundLayer = true } + return false, nil + }); done { + return res, err } if foundLayer { return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist) @@ -1713,72 +1813,39 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { // SetLayerBigData stores a (possibly large) chunk of named data // associated with a layer. func (s *store) SetLayerBigData(id, key string, data io.Reader) error { - store, err := s.LayerStore() - if err != nil { - return err - } - - store.Lock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return err - } - return store.SetBigData(id, key, data) + return s.writeToLayerStore(func(store rwLayerStore) error { + return store.SetBigData(id, key, data) + }) } func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { - ristore, err := s.ImageStore() - if err != nil { - return err - } - - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - - return ristore.SetBigData(id, key, data, digestManifest) + return s.writeToImageStore(func() error { + return s.imageStore.SetBigData(id, key, data, digestManifest) + }) } func (s *store) ImageSize(id string) (int64, error) { - var image *Image - - lstore, err := s.LayerStore() + layerStores, err := s.allLayerStores() if err != nil { - return -1, fmt.Errorf("loading primary layer store data: %w", err) - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, fmt.Errorf("loading additional layer stores: %w", err) + return -1, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } - } - - var imageStore ROBigDataStore - istore, err := s.ImageStore() - if err != nil { - return -1, fmt.Errorf("loading primary image store data: %w", err) - } - istores, err := s.ROImageStores() - if err != nil { - return -1, fmt.Errorf("loading additional image stores: %w", err) + defer store.stopReading() } // Look for the image's record. - for _, s := range append([]ROImageStore{istore}, istores...) { + var imageStore roBigDataStore + var image *Image + for _, s := range s.allImageStores() { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } + defer store.stopReading() if image, err = store.Get(id); err == nil { imageStore = store break @@ -1806,9 +1873,9 @@ func (s *store) ImageSize(id string) (int64, error) { } visited[layerID] = struct{}{} // Look for the layer and the store that knows about it. - var layerStore ROLayerStore + var layerStore roLayerStore var layer *Layer - for _, store := range append([]ROLayerStore{lstore}, lstores...) { + for _, store := range layerStores { if layer, err = store.Get(layerID); err == nil { layerStore = store break @@ -1852,21 +1919,16 @@ func (s *store) ImageSize(id string) (int64, error) { } func (s *store) ContainerSize(id string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() + layerStores, err := s.allLayerStores() if err != nil { return -1, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } + defer store.stopReading() } // Get the location of the container directory and container run directory. @@ -1880,188 +1942,132 @@ func (s *store) ContainerSize(id string) (int64, error) { return -1, err } - rcstore, err := s.ContainerStore() - if err != nil { - return -1, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + err = s.writeToContainerStore(func() error { // Yes, s.containerStore.BigDataSize requires a write lock. + // Read the container record. + container, err := s.containerStore.Get(id) + if err != nil { + return err + } - // Read the container record. - container, err := rcstore.Get(id) - if err != nil { - return -1, err - } + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range layerStores { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + } + break + } + } + if layer == nil { + return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) + } - // Read the container's layer's size. - var layer *Layer - var size int64 - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - if layer, err = store.Get(container.LayerID); err == nil { - size, err = store.DiffSize("", layer.ID) + // Count big data items. + names, err := s.containerStore.BigDataNames(id) + if err != nil { + return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) + } + for _, name := range names { + n, err := s.containerStore.BigDataSize(id, name) if err != nil { - return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) } - break + size += n } - } - if layer == nil { - return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) - } - // Count big data items. - names, err := rcstore.BigDataNames(id) - if err != nil { - return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) - } - for _, name := range names { - n, err := rcstore.BigDataSize(id, name) + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) if err != nil { - return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) + return err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return err } size += n - } - - // Count the size of our container directory and container run directory. - n, err := directory.Size(cdir) - if err != nil { - return -1, err - } - size += n - n, err = directory.Size(rdir) - if err != nil { - return -1, err - } - size += n - return size, nil + res = size + return nil + }) + return res, err } func (s *store) ListContainerBigData(id string) ([]string, error) { - rcstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - - return rcstore.BigDataNames(id) + return s.containerStore.BigDataNames(id) } func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return -1, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return -1, err - } - return rcstore.BigDataSize(id, key) + var res int64 = -1 + err := s.writeToContainerStore(func() error { // Yes, BigDataSize requires a write lock. + var err error + res, err = s.containerStore.BigDataSize(id, key) + return err + }) + return res, err } func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return "", err - } - return rcstore.BigDataDigest(id, key) + var res digest.Digest + err := s.writeToContainerStore(func() error { // Yes, BigDataDigest requires a write lock. + var err error + res, err = s.containerStore.BigDataDigest(id, key) + return err + }) + return res, err } func (s *store) ContainerBigData(id, key string) ([]byte, error) { - rcstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - return rcstore.BigData(id, key) + defer s.containerStore.stopReading() + return s.containerStore.BigData(id, key) } func (s *store) SetContainerBigData(id, key string, data []byte) error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - return rcstore.SetBigData(id, key, data) + return s.writeToContainerStore(func() error { + return s.containerStore.SetBigData(id, key, data) + }) } func (s *store) Exists(id string) bool { - lstore, err := s.LayerStore() - if err != nil { - return false - } - lstores, err := s.ROLayerStores() - if err != nil { - return false - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return false - } + var res = false + + if done, _ := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(id) { - return true + res = true + return true, nil } + return false, nil + }); done { + return res } - istore, err := s.ImageStore() - if err != nil { - return false - } - istores, err := s.ROImageStores() - if err != nil { - return false - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return false - } + if done, _ := s.readAllImageStores(func(store roImageStore) (bool, error) { if store.Exists(id) { - return true + res = true + return true, nil } + return false, nil + }); done { + return res } - rcstore, err := s.ContainerStore() - if err != nil { - return false - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return false } - if rcstore.Exists(id) { - return true - } - - return false + defer s.containerStore.stopReading() + return s.containerStore.Exists(id) } func dedupeNames(names []string) []string { @@ -2092,206 +2098,117 @@ func (s *store) RemoveNames(id string, names []string) error { func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - if rlstore.Exists(id) { - switch op { - case setNames: - return rlstore.SetNames(id, deduped) - case removeNames: - return rlstore.RemoveNames(id, deduped) - case addNames: - return rlstore.AddNames(id, deduped) - default: - return errInvalidUpdateNameOperation + layerFound := false + if err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if !rlstore.Exists(id) { + return nil } - } - - ristore, err := s.ImageStore() - if err != nil { + layerFound = true + return rlstore.updateNames(id, deduped, op) + }); err != nil || layerFound { return err } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { + + if err := s.imageStore.startWriting(); err != nil { return err } - if ristore.Exists(id) { - switch op { - case setNames: - return ristore.SetNames(id, deduped) - case removeNames: - return ristore.RemoveNames(id, deduped) - case addNames: - return ristore.AddNames(id, deduped) - default: - return errInvalidUpdateNameOperation - } + defer s.imageStore.stopWriting() + if s.imageStore.Exists(id) { + return s.imageStore.updateNames(id, deduped, op) } // Check is id refers to a RO Store - ristores, err := s.ROImageStores() - if err != nil { - return err - } - for _, s := range ristores { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { return err } + defer store.stopReading() if i, err := store.Get(id); err == nil { if len(deduped) > 1 { // Do not want to create image name in R/W storage deduped = deduped[1:] } - _, err := ristore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) - if err == nil { - return ristore.Save() - } + _, err := s.imageStore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) return err } } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - if rcstore.Exists(id) { - switch op { - case setNames: - return rcstore.SetNames(id, deduped) - case removeNames: - return rcstore.RemoveNames(id, deduped) - case addNames: - return rcstore.AddNames(id, deduped) - default: - return errInvalidUpdateNameOperation + containerFound := false + if err := s.writeToContainerStore(func() error { + if !s.containerStore.Exists(id) { + return nil } + containerFound = true + return s.containerStore.updateNames(id, deduped, op) + }); err != nil || containerFound { + return err } + return ErrLayerUnknown } func (s *store) Names(id string) ([]string, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if l, err := store.Get(id); l != nil && err == nil { - return l.Names, nil + res = l.Names + return true, nil } + return false, nil + }); done { + return res, err } - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if i, err := store.Get(id); i != nil && err == nil { - return i.Names, nil + res = i.Names + return true, nil } + return false, nil + }); done { + return res, err } - rcstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - if c, err := rcstore.Get(id); c != nil && err == nil { + defer s.containerStore.stopReading() + if c, err := s.containerStore.Get(id); c != nil && err == nil { return c.Names, nil } return nil, ErrLayerUnknown } func (s *store) Lookup(name string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + var res string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if l, err := store.Get(name); l != nil && err == nil { - return l.ID, nil + res = l.ID + return true, nil } + return false, nil + }); done { + return res, err } - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if i, err := store.Get(name); i != nil && err == nil { - return i.ID, nil + res = i.ID + return true, nil } + return false, nil + }); done { + return res, err } - cstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } - cstore.RLock() - defer cstore.Unlock() - if err := cstore.ReloadIfChanged(); err != nil { - return "", err - } - if c, err := cstore.Get(name); c != nil && err == nil { + defer s.containerStore.stopReading() + if c, err := s.containerStore.Get(name); c != nil && err == nil { return c.ID, nil } @@ -2299,417 +2216,280 @@ func (s *store) Lookup(name string) (string, error) { } func (s *store) DeleteLayer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - - if rlstore.Exists(id) { - if l, err := rlstore.Get(id); err != nil { - id = l.ID - } - layers, err := rlstore.Layers() - if err != nil { - return err - } - for _, layer := range layers { - if layer.Parent == id { - return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren) + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID } - } - images, err := ristore.Images() - if err != nil { - return err - } - - for _, image := range images { - if image.TopLayer == id { - return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren) + } + } + images, err := s.imageStore.Images() + if err != nil { + return err } - if stringutils.InSlice(image.MappedTopLayers, id) { - // No write access to the image store, fail before the layer is deleted - if _, ok := ristore.(*imageStore); !ok { + + for _, image := range images { + if image.TopLayer == id { return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) } } - } - containers, err := rcstore.Containers() - if err != nil { - return err - } - for _, container := range containers { - if container.LayerID == id { - return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) + containers, err := s.containerStore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) + } + } + if err := rlstore.Delete(id); err != nil { + return fmt.Errorf("delete layer %v: %w", id, err) } - } - if err := rlstore.Delete(id); err != nil { - return fmt.Errorf("delete layer %v: %w", id, err) - } - // The check here is used to avoid iterating the images if we don't need to. - // There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers. - if istore, ok := ristore.(*imageStore); ok { for _, image := range images { if stringutils.InSlice(image.MappedTopLayers, id) { - if err = istore.removeMappedTopLayer(image.ID, id); err != nil { + if err = s.imageStore.removeMappedTopLayer(image.ID, id); err != nil { return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err) } } } + return nil } - return nil - } - return ErrNotALayer + return ErrNotALayer + }) } func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return nil, err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } layersToRemove := []string{} - if ristore.Exists(id) { - image, err := ristore.Get(id) - if err != nil { - return nil, err - } - id = image.ID - containers, err := rcstore.Containers() - if err != nil { - return nil, err - } - aContainerByImage := make(map[string]string) - for _, container := range containers { - aContainerByImage[container.ImageID] = container.ID - } - if container, ok := aContainerByImage[id]; ok { - return nil, fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) - } - images, err := ristore.Images() - if err != nil { - return nil, err - } - layers, err := rlstore.Layers() - if err != nil { - return nil, err - } - childrenByParent := make(map[string][]string) - for _, layer := range layers { - childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) - } - otherImagesTopLayers := make(map[string]struct{}) - for _, img := range images { - if img.ID != id { - otherImagesTopLayers[img.TopLayer] = struct{}{} - for _, layerID := range img.MappedTopLayers { - otherImagesTopLayers[layerID] = struct{}{} - } + if err := s.writeToAllStores(func(rlstore rwLayerStore) error { + if s.imageStore.Exists(id) { + image, err := s.imageStore.Get(id) + if err != nil { + return err } - } - if commit { - if err = ristore.Delete(id); err != nil { - return nil, err + id = image.ID + containers, err := s.containerStore.Containers() + if err != nil { + return err } - } - layer := image.TopLayer - layersToRemoveMap := make(map[string]struct{}) - layersToRemove = append(layersToRemove, image.MappedTopLayers...) - for _, mappedTopLayer := range image.MappedTopLayers { - layersToRemoveMap[mappedTopLayer] = struct{}{} - } - for layer != "" { - if rcstore.Exists(layer) { - break + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID } - if _, used := otherImagesTopLayers[layer]; used { - break + if container, ok := aContainerByImage[id]; ok { + return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) + } + images, err := s.imageStore.Images() + if err != nil { + return err + } + layers, err := rlstore.Layers() + if err != nil { + return err } - parent := "" - if l, err := rlstore.Get(layer); err == nil { - parent = l.Parent + childrenByParent := make(map[string][]string) + for _, layer := range layers { + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) } - hasChildrenNotBeingRemoved := func() bool { - layersToCheck := []string{layer} - if layer == image.TopLayer { - layersToCheck = append(layersToCheck, image.MappedTopLayers...) + otherImagesTopLayers := make(map[string]struct{}) + for _, img := range images { + if img.ID != id { + otherImagesTopLayers[img.TopLayer] = struct{}{} + for _, layerID := range img.MappedTopLayers { + otherImagesTopLayers[layerID] = struct{}{} + } + } + } + if commit { + if err = s.imageStore.Delete(id); err != nil { + return err + } + } + layer := image.TopLayer + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } + for layer != "" { + if s.containerStore.Exists(layer) { + break } - for _, layer := range layersToCheck { - if childList := childrenByParent[layer]; len(childList) > 0 { - for _, child := range childList { - if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { - continue + if _, used := otherImagesTopLayers[layer]; used { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasChildrenNotBeingRemoved := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue + } + return true } - return true } } + return false } - return false - } - if hasChildrenNotBeingRemoved() { - break + if hasChildrenNotBeingRemoved() { + break + } + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} + layer = parent } - layersToRemove = append(layersToRemove, layer) - layersToRemoveMap[layer] = struct{}{} - layer = parent + } else { + return ErrNotAnImage } - } else { - return nil, ErrNotAnImage - } - if commit { - for _, layer := range layersToRemove { - if err = rlstore.Delete(layer); err != nil { - return nil, err + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return err + } } } + return nil + }); err != nil { + return nil, err } return layersToRemove, nil } func (s *store) DeleteContainer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if !s.containerStore.Exists(id) { + return ErrNotAContainer + } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } + container, err := s.containerStore.Get(id) + if err != nil { + return ErrNotAContainer + } - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - errChan := make(chan error) - var wg sync.WaitGroup + errChan := make(chan error) + var wg sync.WaitGroup - if rlstore.Exists(container.LayerID) { - wg.Add(1) - go func() { - errChan <- rlstore.Delete(container.LayerID) - wg.Done() - }() - } + if rlstore.Exists(container.LayerID) { wg.Add(1) go func() { - errChan <- rcstore.Delete(id) + errChan <- rlstore.Delete(container.LayerID) wg.Done() }() + } + wg.Add(1) + go func() { + errChan <- s.containerStore.Delete(id) + wg.Done() + }() + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) + }() + + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) + }() - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() - // attempt a simple rm -rf first - err := os.RemoveAll(gcpath) - if err == nil { - errChan <- nil - return - } - // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(gcpath) - }() - - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() - // attempt a simple rm -rf first - err := os.RemoveAll(rcpath) - if err == nil { - errChan <- nil - return - } - // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(rcpath) - }() - - go func() { - wg.Wait() - close(errChan) - }() + go func() { + wg.Wait() + close(errChan) + }() - var errors []error - for err := range errChan { - if err != nil { - errors = append(errors, err) - } + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) } - return multierror.Append(nil, errors...).ErrorOrNil() } - } - return ErrNotAContainer + return multierror.Append(nil, errors...).ErrorOrNil() + }) } func (s *store) Delete(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - if rlstore.Exists(container.LayerID) { - if err = rlstore.Delete(container.LayerID); err != nil { - return err - } - if err = rcstore.Delete(id); err != nil { - return err - } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(gcpath); err != nil { - return err - } - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(rcpath); err != nil { - return err + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if s.containerStore.Exists(id) { + if container, err := s.containerStore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = s.containerStore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil } - return nil + return ErrNotALayer } - return ErrNotALayer } - } - if ristore.Exists(id) { - return ristore.Delete(id) - } - if rlstore.Exists(id) { - return rlstore.Delete(id) - } - return ErrLayerUnknown -} - -func (s *store) Wipe() error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rlstore, err := s.LayerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } + if s.imageStore.Exists(id) { + return s.imageStore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown + }) +} - if err = rcstore.Wipe(); err != nil { - return err - } - if err = ristore.Wipe(); err != nil { - return err - } - return rlstore.Wipe() +func (s *store) Wipe() error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if err := s.containerStore.Wipe(); err != nil { + return err + } + if err := s.imageStore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() + }) } func (s *store) Status() ([][2]string, error) { - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, err } @@ -2721,37 +2501,24 @@ func (s *store) Version() ([][2]string, error) { } func (s *store) mount(id string, options drivers.MountOpts) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - - s.graphLock.Lock() - defer s.graphLock.Unlock() - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { return "", err } + defer s.stopUsingGraphDriver() - modified, err := s.graphLock.Modified() + rlstore, err := s.getLayerStoreLocked() if err != nil { return "", err } - - /* We need to make sure the home mount is present when the Mount is done. */ - if modified { - s.graphDriver = nil - s.layerStore = nil - s.graphDriver, err = s.getGraphDriver() - if err != nil { - return "", err - } - s.lastLoaded = time.Now() + if err := rlstore.startWriting(); err != nil { + return "", err } + defer rlstore.stopWriting() if options.UidMaps != nil || options.GidMaps != nil { - options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) + options.DisableShifting = !canUseShifting(rlstore, options.UidMaps, options.GidMaps) } if rlstore.Exists(id) { @@ -2790,8 +2557,10 @@ func (s *store) Mount(id, mountLabel string) (string, error) { options.GidMaps = container.GIDMap options.Options = container.MountOpts() if !s.disableVolatile { - if v, found := container.Flags["Volatile"]; found { - options.Volatile = v.(bool) + if v, found := container.Flags[volatileFlag]; found { + if b, ok := v.(bool); ok { + options.Volatile = b + } } } } @@ -2802,15 +2571,14 @@ func (s *store) Mounted(id string) (int, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return 0, err } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return 0, err } + defer rlstore.stopReading() return rlstore.Mounted(id) } @@ -2827,103 +2595,66 @@ func (s *store) Unmount(id string, force bool) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - rlstore, err := s.LayerStore() - if err != nil { - return false, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return false, err - } - if rlstore.Exists(id) { - return rlstore.Unmount(id, force) - } - return false, ErrLayerUnknown + var res bool + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + var err error + res, err = rlstore.unmount(id, force, false) + return err + } + return ErrLayerUnknown + }) + return res, err } func (s *store) Changes(from, to string) ([]archive.Change, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []archive.Change + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(to) { - return store.Changes(from, to) + var err error + res, err = store.Changes(from, to) + return true, err } + return false, nil + }); done { + return res, err } return nil, ErrLayerUnknown } func (s *store) DiffSize(from, to string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(to) { - return store.DiffSize(from, to) + var err error + res, err = store.DiffSize(from, to) + return true, err } + return false, nil + }); done { + return res, err } return -1, ErrLayerUnknown } func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - // NaiveDiff could cause mounts to happen without a lock, so be safe // and treat the .Diff operation as a Mount. - s.graphLock.Lock() - defer s.graphLock.Unlock() - - modified, err := s.graphLock.Modified() - if err != nil { + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { return nil, err } + defer s.stopUsingGraphDriver() - // We need to make sure the home mount is present when the Mount is done. - if modified { - s.graphDriver = nil - s.layerStore = nil - s.graphDriver, err = s.getGraphDriver() - if err != nil { - return nil, err - } - s.lastLoaded = time.Now() + layerStores, err := s.allLayerStoresLocked() + if err != nil { + return nil, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - store.RLock() - if err := store.ReloadIfChanged(); err != nil { - store.Unlock() + if err := store.startReading(); err != nil { return nil, err } if store.Exists(to) { @@ -2931,130 +2662,87 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if rc != nil && err == nil { wrapped := ioutils.NewReadCloserWrapper(rc, func() error { err := rc.Close() - store.Unlock() + store.stopReading() return err }) return wrapped, nil } - store.Unlock() + store.stopReading() return rc, err } - store.Unlock() + store.stopReading() } return nil, ErrLayerUnknown } func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return err + return s.writeToLayerStore(func(rlstore rwLayerStore) error { + if !rlstore.Exists(to) { + return ErrLayerUnknown } - } - if !rlstore.Exists(to) { - return ErrLayerUnknown - } - return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + }) } func (s *store) CleanupStagingDirectory(stagingDirectory string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return err - } - } - return rlstore.CleanupStagingDirectory(stagingDirectory) + return s.writeToLayerStore(func(rlstore rwLayerStore) error { + return rlstore.CleanupStagingDirectory(stagingDirectory) + }) } func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return nil, err + var res *drivers.DriverWithDifferOutput + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if to != "" && !rlstore.Exists(to) { + return ErrLayerUnknown } - } - if to != "" && !rlstore.Exists(to) { - return nil, ErrLayerUnknown - } - return rlstore.ApplyDiffWithDiffer(to, options, differ) + var err error + res, err = rlstore.ApplyDiffWithDiffer(to, options, differ) + return err + }) + return res, err } func (s *store) DifferTarget(id string) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return "", err + var res string + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + var err error + res, err = rlstore.DifferTarget(id) + return err } - } - if rlstore.Exists(id) { - return rlstore.DifferTarget(id) - } - return "", ErrLayerUnknown + return ErrLayerUnknown + }) + return res, err } func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return -1, err - } - if rlstore.Exists(to) { - return rlstore.ApplyDiff(to, diff) - } - return -1, ErrLayerUnknown + var res int64 = -1 + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(to) { + var err error + res, err = rlstore.ApplyDiff(to, diff) + return err + } + return ErrLayerUnknown + }) + return res, err } -func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { +func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { storeLayers, err := m(store, d) if err != nil { if !errors.Is(err, ErrLayerUnknown) { - return nil, err + return true, err } - continue + return false, nil } layers = append(layers, storeLayers...) + return false, nil + }); err != nil { + return nil, err } if len(layers) == 0 { return nil, ErrLayerUnknown @@ -3066,49 +2754,40 @@ func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err) } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err) } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } func (s *store) LayerSize(id string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(id) { - return store.Size(id) + var err error + res, err = store.Size(id) + return true, err } + return false, nil + }); done { + return res, err } return -1, ErrLayerUnknown } func (s *store) LayerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, nil, err } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return nil, nil, err } + defer rlstore.stopReading() if rlstore.Exists(id) { return rlstore.ParentOwners(id) } @@ -3116,25 +2795,19 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) { } func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, nil, err } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, nil, err - } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return nil, nil, err } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + defer rlstore.stopReading() + if err := s.containerStore.startReading(); err != nil { return nil, nil, err } - container, err := rcstore.Get(id) + defer s.containerStore.stopReading() + container, err := s.containerStore.Get(id) if err != nil { return nil, nil, err } @@ -3145,114 +2818,74 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { } func (s *store) Layers() ([]Layer, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - - layers, err := func() ([]Layer, error) { - lstore.Lock() - defer lstore.Unlock() - if err := lstore.Load(); err != nil { - return nil, err - } - return lstore.Layers() - }() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - - for _, s := range lstores { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var layers []Layer + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { storeLayers, err := store.Layers() if err != nil { - return nil, err + return true, err } layers = append(layers, storeLayers...) + return false, nil + }); done { + return nil, err } return layers, nil } func (s *store) Images() ([]Image, error) { var images []Image - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { storeImages, err := store.Images() if err != nil { - return nil, err + return true, err } images = append(images, storeImages...) + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) Containers() ([]Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - return rcstore.Containers() + return s.containerStore.Containers() } func (s *store) Layer(id string) (*Layer, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res *Layer + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { layer, err := store.Get(id) if err == nil { - return layer, nil + res = layer + return true, nil } + return false, nil + }); done { + return res, err } return nil, ErrLayerUnknown } func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { - adriver, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) - if !ok { - return nil, ErrLayerUnknown + var adriver drivers.AdditionalLayerStoreDriver + if err := func() error { // A scope for defer + if err := s.startUsingGraphDriver(); err != nil { + return err + } + defer s.stopUsingGraphDriver() + a, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) + if !ok { + return ErrLayerUnknown + } + adriver = a + return nil + }(); err != nil { + return nil, err } al, err := adriver.LookupAdditionalLayer(d, imageref) @@ -3289,29 +2922,23 @@ func (al *additionalLayer) CompressedSize() int64 { } func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) { - rlstore, err := al.s.LayerStore() + rlstore, rlstores, err := al.s.bothLayerStoreKinds() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return nil, err - } - rlstores, err := al.s.ROLayerStores() - if err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() var parentLayer *Layer if parent != "" { - for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + for _, lstore := range append([]roLayerStore{rlstore}, rlstores...) { if lstore != rlstore { - lstore.RLock() - defer lstore.Unlock() - if err := lstore.ReloadIfChanged(); err != nil { + if err := lstore.startReading(); err != nil { return nil, err } + defer lstore.stopReading() } parentLayer, err = lstore.Get(parent) if err == nil { @@ -3331,117 +2958,75 @@ func (al *additionalLayer) Release() { } func (s *store) Image(id string) (*Image, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res *Image + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { image, err := store.Get(id) if err == nil { - return image, nil + res = image + return true, nil } + return false, nil + }); done { + return res, err } return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { - images := []*Image{} layer, err := s.Layer(id) if err != nil { return nil, err } - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + images := []*Image{} + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { imageList, err := store.Images() if err != nil { - return nil, err + return true, err } for _, image := range imageList { + image := image if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { images = append(images, &image) } } + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { images := []*Image{} - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { imageList, err := store.ByDigest(d) if err != nil && !errors.Is(err, ErrImageUnknown) { - return nil, err + return true, err } images = append(images, imageList...) + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) Container(id string) (*Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - return rcstore.Get(id) + return s.containerStore.Get(id) } func (s *store) ContainerLayerID(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } - container, err := rcstore.Get(id) + defer s.containerStore.stopReading() + container, err := s.containerStore.Get(id) if err != nil { return "", err } @@ -3453,16 +3038,11 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - containerList, err := rcstore.Containers() + defer s.containerStore.stopReading() + containerList, err := s.containerStore.Containers() if err != nil { return nil, err } @@ -3476,17 +3056,12 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { } func (s *store) ContainerDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } + defer s.containerStore.stopReading() - id, err = rcstore.Lookup(id) + id, err := s.containerStore.Lookup(id) if err != nil { return "", err } @@ -3500,18 +3075,12 @@ func (s *store) ContainerDirectory(id string) (string, error) { } func (s *store) ContainerRunDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } + defer s.containerStore.stopReading() - id, err = rcstore.Lookup(id) + id, err := s.containerStore.Lookup(id) if err != nil { return "", err } @@ -3541,7 +3110,7 @@ func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { if err != nil { return nil, err } - return ioutil.ReadFile(filepath.Join(dir, file)) + return os.ReadFile(filepath.Join(dir, file)) } func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { @@ -3561,26 +3130,25 @@ func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { if err != nil { return nil, err } - return ioutil.ReadFile(filepath.Join(dir, file)) + return os.ReadFile(filepath.Join(dir, file)) } func (s *store) Shutdown(force bool) ([]string, error) { mounted := []string{} - modified := false - rlstore, err := s.LayerStore() - if err != nil { + if err := s.startUsingGraphDriver(); err != nil { return mounted, err } + defer s.stopUsingGraphDriver() - s.graphLock.Lock() - defer s.graphLock.Unlock() - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + rlstore, err := s.getLayerStoreLocked() + if err != nil { + return mounted, err + } + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() layers, err := rlstore.Layers() if err != nil { @@ -3592,15 +3160,17 @@ func (s *store) Shutdown(force bool) ([]string, error) { } mounted = append(mounted, layer.ID) if force { - for layer.MountCount > 0 { - _, err2 := rlstore.Unmount(layer.ID, force) + for { + _, err2 := rlstore.unmount(layer.ID, force, true) + if err2 == ErrLayerNotMounted { + break + } if err2 != nil { if err == nil { err = err2 } break } - modified = true } } } @@ -3609,11 +3179,16 @@ func (s *store) Shutdown(force bool) ([]string, error) { } if err == nil { err = s.graphDriver.Cleanup() - s.graphLock.Touch() - modified = true - } - if modified { - rlstore.Touch() + // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), + // so that we reload after a .Shutdown() the same way other processes would. + // Shutdown() is basically an error path, so reliability is more important than performance. + if _, err2 := s.graphLock.RecordWrite(); err2 != nil { + if err == nil { + err = err2 + } else { + err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err) + } + } } return mounted, err } @@ -3701,9 +3276,10 @@ const AutoUserNsMaxSize = 65536 // creating a user namespace. const RootAutoUserNsUser = "containers" -// SetDefaultConfigFilePath sets the default configuration to the specified path +// SetDefaultConfigFilePath sets the default configuration to the specified path, and loads the file. +// Deprecated: Use types.SetDefaultConfigFilePath, which can return an error. func SetDefaultConfigFilePath(path string) { - types.SetDefaultConfigFilePath(path) + _ = types.SetDefaultConfigFilePath(path) } // DefaultConfigFile returns the path to the storage config file used @@ -3713,8 +3289,9 @@ func DefaultConfigFile(rootless bool) (string, error) { // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. +// Deprecated: Use types.ReloadConfigurationFile, which can return an error. func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { - types.ReloadConfigurationFile(configFile, storeOptions) + _ = types.ReloadConfigurationFile(configFile, storeOptions) } // GetDefaultMountOptions returns the default mountoptions defined in container/storage @@ -3756,3 +3333,20 @@ func (s *store) Free() { } } } + +// Tries to clean up old unreferenced container leftovers. returns the first error +// but continues as far as it can +func (s *store) GarbageCollect() error { + firstErr := s.writeToContainerStore(func() error { + return s.containerStore.GarbageCollect() + }) + + moreErr := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return rlstore.GarbageCollect() + }) + if firstErr == nil { + firstErr = moreErr + } + + return firstErr +} diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index ad12ffdbf2d..dc6ee3e0c94 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -57,4 +57,6 @@ var ( ErrNotSupported = errors.New("not supported") // ErrInvalidMappings is returned when the specified mappings are invalid. ErrInvalidMappings = errors.New("invalid mappings specified") + // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace. + ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 5421c02dae6..01f4e5a7972 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -19,9 +19,11 @@ import ( type TomlConfig struct { Storage struct { Driver string `toml:"driver,omitempty"` + DriverPriority []string `toml:"driver_priority,omitempty"` RunRoot string `toml:"runroot,omitempty"` GraphRoot string `toml:"graphroot,omitempty"` RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + TransientStore bool `toml:"transient_store,omitempty"` Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } @@ -35,20 +37,55 @@ const ( var ( defaultStoreOptionsOnce sync.Once loadDefaultStoreOptionsErr error + once sync.Once + storeOptions StoreOptions + storeError error + defaultConfigFileSet bool + // defaultConfigFile path to the system wide storage.conf file + defaultConfigFile = SystemConfigFile + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions ) func loadDefaultStoreOptions() { - defaultStoreOptions.RunRoot = defaultRunRoot - defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" + setDefaults := func() { + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } + } + setDefaults() + if path, ok := os.LookupEnv(storageConfEnv); ok { defaultOverrideConfigFile = path if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil { loadDefaultStoreOptionsErr = err return } - } else if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + setDefaults() + return + } + + if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { + homeConfigFile := filepath.Join(path, "containers", "storage.conf") + if _, err := os.Stat(homeConfigFile); err == nil { + // user storage.conf in XDG_CONFIG_HOME if it exists + defaultOverrideConfigFile = homeConfigFile + } else { + if !os.IsNotExist(err) { + loadDefaultStoreOptionsErr = err + return + } + } + } + + _, err := os.Stat(defaultOverrideConfigFile) + if err == nil { // The DefaultConfigFile(rootless) function returns the path // of the used storage.conf file, by returning defaultConfigFile // If override exists containers/storage uses it by default. @@ -57,22 +94,18 @@ func loadDefaultStoreOptions() { loadDefaultStoreOptionsErr = err return } - } else { - if !os.IsNotExist(err) { - logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) - } - if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { - loadDefaultStoreOptionsErr = err - return - } + setDefaults() + return } - // reload could set values to empty for run and graph root if config does not contains anything - if defaultStoreOptions.RunRoot == "" { - defaultStoreOptions.RunRoot = defaultRunRoot + + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) } - if defaultStoreOptions.GraphRoot == "" { - defaultStoreOptions.GraphRoot = defaultGraphRoot + if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { + loadDefaultStoreOptionsErr = err + return } + setDefaults() } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -144,8 +177,8 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str return storageOpts, nil } -// DefaultStoreOptions returns the default storage ops for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { +// loadStoreOptions returns the default storage ops for containers +func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) if err != nil { return defaultStoreOptions, err @@ -153,6 +186,21 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) } +// UpdateOptions should be called iff container engine recieved a SIGHUP, +// otherwise use DefaultStoreOptions +func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + return storeOptions, storeError +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + once.Do(func() { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + }) + return storeOptions, storeError +} + // StoreOptions is used for passing initialization options to GetStore(), for // initializing a Store object and the underlying storage that it controls. type StoreOptions struct { @@ -166,10 +214,16 @@ type StoreOptions struct { // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage RootlessStoragePath string `toml:"rootless_storage_path"` - // GraphDriverName is the underlying storage driver that we'll be - // using. It only needs to be specified the first time a Store is - // initialized for a given RunRoot and GraphRoot. + // If the driver is not specified, the best suited driver will be picked + // either from GraphDriverPriority, if specified, or from the platform + // dependent priority list (in that order). GraphDriverName string `json:"driver,omitempty"` + // GraphDriverPriority is a list of storage drivers that will be tried + // to initialize the Store for a given RunRoot and GraphRoot unless a + // GraphDriverName is set. + // This list can be used to define a custom order in which the drivers + // will be tried. + GraphDriverPriority []string `json:"driver-priority,omitempty"` // GraphDriverOptions are driver-specific options. GraphDriverOptions []string `json:"driver-options,omitempty"` // UIDMap and GIDMap are used for setting up a container's root filesystem @@ -188,6 +242,8 @@ type StoreOptions struct { PullOptions map[string]string `toml:"pull_options"` // DisableVolatile doesn't allow volatile mounts when it is set. DisableVolatile bool `json:"disable-volatile,omitempty"` + // If transient, don't persist containers over boot (stores db in runroot) + TransientStore bool `json:"transient_store,omitempty"` } // isRootlessDriver returns true if the given storage driver is valid for containers running as non root @@ -244,7 +300,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti } } if opts.GraphDriverName == "" { - opts.GraphDriverName = "vfs" + if len(systemOpts.GraphDriverPriority) == 0 { + opts.GraphDriverName = "vfs" + } else { + opts.GraphDriverPriority = systemOpts.GraphDriverPriority + } } if os.Getenv("STORAGE_OPTS") != "" { @@ -313,7 +373,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro } } else { if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) return err } } @@ -331,8 +391,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") storeOptions.GraphDriverName = overlayDriver } - if storeOptions.GraphDriverName == "" { - logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile) + storeOptions.GraphDriverPriority = config.Storage.DriverPriority + if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 { + logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile) } if config.Storage.RunRoot != "" { storeOptions.RunRoot = config.Storage.RunRoot @@ -376,7 +437,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) if err != nil { - fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) return err } storeOptions.UIDMap = mappings.UIDs() @@ -406,6 +467,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro } storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile + storeOptions.TransientStore = config.Storage.TransientStore storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go index d5ad50bc0bd..eed1a3d94b8 100644 --- a/vendor/github.com/containers/storage/types/options_darwin.go +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -5,13 +5,9 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" ) -// defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/usr/share/containers/storage.conf" defaultOverrideConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions ) diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go index d5976b6d581..afb7ec6b4aa 100644 --- a/vendor/github.com/containers/storage/types/options_freebsd.go +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -5,13 +5,10 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/var/run/containers/storage" defaultGraphRoot string = "/var/db/containers/storage" + SystemConfigFile = "/usr/local/share/containers/storage.conf" ) // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/usr/local/share/containers/storage.conf" defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions ) diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go index d5ad50bc0bd..d44aaf76a30 100644 --- a/vendor/github.com/containers/storage/types/options_linux.go +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -5,13 +5,10 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" ) // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/usr/share/containers/storage.conf" defaultOverrideConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions ) diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go index d5ad50bc0bd..d44aaf76a30 100644 --- a/vendor/github.com/containers/storage/types/options_windows.go +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -5,13 +5,10 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" ) // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/usr/share/containers/storage.conf" defaultOverrideConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions ) diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 88641d42416..72c38f861d3 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -3,7 +3,6 @@ package types import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -75,7 +74,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e return runtimeDir, nil } - initCommand, err := ioutil.ReadFile(env.getProcCommandFile()) + initCommand, err := os.ReadFile(env.getProcCommandFile()) if err != nil || string(initCommand) == "systemd" { runUserDir := env.getRunUserDir() if isRootlessRuntimeDirOwner(runUserDir, env) { @@ -174,6 +173,9 @@ func DefaultConfigFile(rootless bool) (string, error) { return path, nil } if !rootless { + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + return defaultOverrideConfigFile, nil + } return defaultConfigFile, nil } @@ -194,7 +196,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio fi, err := os.Stat(configFile) if err != nil { if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) } return } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index e0e530275a1..9c27b5d3990 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -78,6 +78,10 @@ func (s *store) getAvailableIDs() (*idSet, *idSet, error) { return u, g, nil } +// nobodyUser returns the UID and GID of the "nobody" user. Hardcode its value +// for simplicity. +const nobodyUser = 65534 + // parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and // /etc/group files. func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { @@ -98,10 +102,10 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { if u.Name == "nobody" { continue } - if u.Uid > size { + if u.Uid > size && u.Uid != nobodyUser { size = u.Uid } - if u.Gid > size { + if u.Gid > size && u.Gid != nobodyUser { size = u.Gid } } @@ -113,7 +117,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { if g.Name == "nobody" { continue } - if g.Gid > size { + if g.Gid > size && g.Gid != nobodyUser { size = g.Gid } } @@ -123,16 +127,9 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { } // getMaxSizeFromImage returns the maximum ID used by the specified image. -// The layer stores must be already locked. -func (s *store) getMaxSizeFromImage(image *Image, passwdFile, groupFile string) (uint32, error) { - lstore, err := s.LayerStore() - if err != nil { - return 0, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return 0, err - } +// On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getMaxSizeFromImage(image *Image, rlstore rwLayerStore, lstores []roLayerStore, passwdFile, groupFile string) (_ uint32, retErr error) { + layerStores := append([]roLayerStore{rlstore}, lstores...) size := uint32(0) @@ -140,7 +137,7 @@ func (s *store) getMaxSizeFromImage(image *Image, passwdFile, groupFile string) layerName := image.TopLayer outer: for { - for _, ls := range append([]ROLayerStore{lstore}, lstores...) { + for _, ls := range layerStores { layer, err := ls.Get(layerName) if err != nil { continue @@ -167,11 +164,6 @@ outer: return 0, fmt.Errorf("cannot find layer %q", layerName) } - rlstore, err := s.LayerStore() - if err != nil { - return 0, err - } - layerOptions := &LayerOptions{ IDMappingOptions: types.IDMappingOptions{ HostUIDMapping: true, @@ -187,7 +179,15 @@ outer: if err != nil { return 0, err } - defer rlstore.Delete(clayer.ID) + defer func() { + if err2 := rlstore.Delete(clayer.ID); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("deleting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error deleting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() mountOptions := drivers.MountOpts{ MountLabel: "", @@ -200,7 +200,15 @@ outer: if err != nil { return 0, err } - defer rlstore.Unmount(clayer.ID, true) + defer func() { + if _, err2 := rlstore.unmount(clayer.ID, true, false); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error unmounting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile) if userFilesSize > size { @@ -211,7 +219,8 @@ outer: } // getAutoUserNS creates an automatic user namespace -func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { +// If image != nil, On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rlstore rwLayerStore, lstores []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) { requestedSize := uint32(0) initialSize := uint32(1) if options.Size > 0 { @@ -226,7 +235,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([ return nil, nil, fmt.Errorf("cannot read mappings: %w", err) } - // Look every container that is using a user namespace and store + // Look at every container that is using a user namespace and store // the intervals that are already used. containers, err := s.Containers() if err != nil { @@ -250,7 +259,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([ size = s.autoNsMinSize } if image != nil { - sizeFromImage, err := s.getMaxSizeFromImage(image, options.PasswdFile, options.GroupFile) + sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile) if err != nil { return nil, nil, err } @@ -259,7 +268,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([ } } if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { - return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) + return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize) } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 37d4b79b01b..7f9e92b930a 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "unicode" "github.com/containers/storage/types" ) @@ -16,12 +17,12 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { return types.GetRootlessRuntimeDir(rootlessUID) } -// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { return types.DefaultStoreOptionsAutoDetectUID() } -// DefaultStoreOptions returns the default storage ops for containers +// DefaultStoreOptions returns the default storage options for containers func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { return types.DefaultStoreOptions(rootless, rootlessUID) } @@ -72,3 +73,15 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO } return dedupeNames(result), nil } + +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index cff5af1a64c..147f756fe24 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -176,6 +176,11 @@ func (c *Conn) Close() { c.sigconn.Close() } +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + // NewConnection establishes a connection to a bus using a caller-supplied function. // This allows connecting to remote buses through a user-supplied mechanism. // The supplied function may be called multiple times, and should return independent connections. diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index fa04afc708e..074148cb4d6 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -417,6 +417,29 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { return status, nil } +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + // Deprecated: use ListUnitsContext instead. func (c *Conn) ListUnits() ([]UnitStatus, error) { return c.ListUnitsContext(context.Background()) @@ -828,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { return status, nil } + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen_example.go b/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen_example.go new file mode 100644 index 00000000000..2065c5effa6 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen_example.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//go:build linux +// +build linux + +package dlopen + +// #include +// #include +// +// int +// my_strlen(void *f, const char *s) +// { +// size_t (*strlen)(const char *); +// +// strlen = (size_t (*)(const char *))f; +// return strlen(s); +// } +import "C" + +import ( + "fmt" + "unsafe" +) + +func strlen(libs []string, s string) (int, error) { + h, err := GetHandle(libs) + if err != nil { + return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err) + } + defer h.Close() + + f := "strlen" + cs := C.CString(s) + defer C.free(unsafe.Pointer(cs)) + + strlen, err := h.GetSymbolPointer(f) + if err != nil { + return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err) + } + + len := C.my_strlen(strlen, cs) + + return int(len), nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index 8d58ca0fbca..c5b23a81968 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows // Package journal provides write bindings to the local systemd journal. @@ -53,15 +54,9 @@ var ( onceConn sync.Once ) -func init() { - onceConn.Do(initConn) -} - // Enabled checks whether the local systemd journal is available for logging. func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + if c := getOrInitConn(); c == nil { return false } @@ -74,6 +69,58 @@ func Enabled() bool { return true } +// StderrIsJournalStream returns whether the process stderr is connected +// to the Journal's stream transport. +// +// This can be used for automatic protocol upgrading described in [Journal Native Protocol]. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stderr's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading +func StderrIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stderr) +} + +// StdoutIsJournalStream returns whether the process stdout is connected +// to the Journal's stream transport. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stdout's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// Most users should probably use [StderrIsJournalStream]. +func StdoutIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stdout) +} + +func fdIsJournalStream(fd int) (bool, error) { + journalStream := os.Getenv("JOURNAL_STREAM") + if journalStream == "" { + return false, nil + } + + var expectedStat syscall.Stat_t + _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino) + if err != nil { + return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err) + } + + var stat syscall.Stat_t + err = syscall.Fstat(fd, &stat) + if err != nil { + return false, err + } + + match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino + return match, nil +} + // Send a message to the local systemd journal. vars is a map of journald // fields to values. Fields must be composed of uppercase letters, numbers, // and underscores, but must not start with an underscore. Within these @@ -82,7 +129,7 @@ func Enabled() bool { // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + conn := getOrInitConn() if conn == nil { return errors.New("could not initialize socket to journald") } @@ -126,6 +173,16 @@ func Send(message string, priority Priority, vars map[string]string) error { return nil } +// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary +func getOrInitConn() *net.UnixConn { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn != nil { + return conn + } + onceConn.Do(initConn) + return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) +} + func appendVariable(w io.Writer, name, value string) { if err := validVarName(name); err != nil { fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) @@ -194,7 +251,7 @@ func tempFd() (*os.File, error) { } // initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. +// It is automatically called when needed. func initConn() { autobind, err := net.ResolveUnixAddr("unixgram", "") if err != nil { diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go index 677aca68ed2..322e41e74c3 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -33,3 +33,11 @@ func Enabled() bool { func Send(message string, priority Priority, vars map[string]string) error { return errors.New("could not initialize socket to journald") } + +func StderrIsJournalStream() (bool, error) { + return false, nil +} + +func StdoutIsJournalStream() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 00000000000..591211595aa --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 00000000000..92574a3f4f3 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 00000000000..661f41055e4 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/.dockerignore b/vendor/github.com/docker/distribution/.dockerignore deleted file mode 100644 index e660fd93d31..00000000000 --- a/vendor/github.com/docker/distribution/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -bin/ diff --git a/vendor/github.com/docker/distribution/.golangci.yml b/vendor/github.com/docker/distribution/.golangci.yml deleted file mode 100644 index 36c083b0fc4..00000000000 --- a/vendor/github.com/docker/distribution/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -run: - deadline: 2m - skip-dirs: - - vendor - -issues: - exclude-rules: - # io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch. - - text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16" - linters: - - staticcheck diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d94c3936e02..00000000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,51 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita -Yu Wang yuwaMSFT2 -Yu Wang Yu Wang (UC) -Olivier Gambier dmp -Olivier Gambier Olivier -Olivier Gambier Olivier -Elsan Li 李楠 elsanli(李楠) -Rui Cao ruicao -Gwendolynne Barr gbarr01 -Haibing Zhou 周海兵 zhouhaibing089 -Feng Honglin tifayuki -Helen Xie Helen-xie -Mike Brown Mike Brown -Manish Tomar Manish Tomar -Sakeven Jiang sakeven -Milos Gajdos Milos Gajdos -Derek McGowan Derek McGowa -Adrian Plata Adrian Plata <@users.noreply.github.com> -Sebastiaan van Stijn Sebastiaan van Stijn -Vishesh Jindal Vishesh Jindal -Wang Yan Wang Yan -Chris Patterson Chris Patterson -Eohyung Lee Eohyung Lee -João Pereira <484633+joaodrp@users.noreply.github.com> -Smasherr Smasherr -Thomas Berger Thomas Berger -Samuel Karp Samuel Karp -Justin Cormack -sayboras -CrazyMax <1951866+crazy-max@users.noreply.github.com> -Hayley Swimelar -Jose D. Gomez R -Shengjing Zhu -Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index 2981d016b0d..00000000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry --version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 4c067d9e7ec..00000000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index fb54b68138d..00000000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# syntax=docker/dockerfile:1 - -ARG GO_VERSION=1.19.9 -ARG ALPINE_VERSION=3.16 -ARG XX_VERSION=1.2.1 - -FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base -COPY --from=xx / / -RUN apk add --no-cache bash coreutils file git -ENV GO111MODULE=auto -ENV CGO_ENABLED=0 -WORKDIR /go/src/github.com/docker/distribution - -FROM base AS version -ARG PKG="github.com/docker/distribution" -RUN --mount=target=. \ - VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ - echo -n "${VERSION}" | tee /tmp/.version; - -FROM base AS build -ARG TARGETPLATFORM -ARG LDFLAGS="-s -w" -ARG BUILDTAGS="include_oss include_gcs" -RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=target=/go/pkg/mod,type=cache \ - --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ - set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ - && xx-verify --static /usr/bin/registry - -FROM scratch AS binary -COPY --from=build /usr/bin/registry / - -FROM base AS releaser -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -WORKDIR /work -RUN --mount=from=binary,target=/build \ - --mount=type=bind,target=/src \ - --mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \ - VERSION=$(cat /tmp/.version) \ - && mkdir -p /out \ - && cp /build/registry /src/README.md /src/LICENSE . \ - && tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \ - && sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256" - -FROM scratch AS artifact -COPY --from=releaser /out / - -FROM alpine:${ALPINE_VERSION} -RUN apk add --no-cache ca-certificates -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=binary /registry /bin/registry -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index 3183620c57b..00000000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,243 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# - -[Rules] - - [Rules.maintainers] - - title = "What is a maintainer?" - - text = """ -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. -""" - - [Rules.reviewer] - - title = "What is a reviewer?" - - text = """ -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM count towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. -""" - - [Rules.adding-maintainers] - - title = "How are maintainers added?" - - text = """ -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed on the maintainers mailing list. - -After a candidate has been announced on the maintainers mailing list, the -existing maintainers are given five business days to discuss the candidate, -raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing -list. Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The candidate becomes a maintainer once the pull request is -merged. -""" - - [Rules.stepping-down-policy] - - title = "Stepping down policy" - - text = """ -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. -""" - - [Rules.inactive-maintainers] - - title = "Removal of inactive maintainers" - - text = """ -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of -the current maintainers. An e-mail is sent to the -mailing list, inviting maintainers of the project to vote. The voting period is -five business days. Issues related to a maintainer's performance should be -discussed with them among the other maintainers so that they are not surprised -by a pull request removing them. -""" - - [Rules.decisions] - - title = "How are decisions made?" - - text = """ -Short answer: EVERYTHING IS A PULL REQUEST. - -distribution is an open-source project with an open design philosophy. This means -that the repository is the source of truth for EVERY aspect of the project, -including its philosophy, design, road map, and APIs. *If it's part of the -project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. -""" - - [Rules.DCO] - - title = "Helping contributors with the DCO" - - text = """ -The [DCO or `Sign your work`]( -https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some distribution contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. -""" - - [Rules."no direct push"] - - title = "I'm a maintainer. Should I make pull requests too?" - - text = """ -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. -""" - - [Rules.tsc] - - title = "Conflict Resolution and technical disputes" - - text = """ -distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." - """ - - [Rules.meta] - - title = "How is this process changed?" - - text = "Just like everything else: by making a pull request :)" - -# Current project organization -[Org] - - [Org.Maintainers] - people = [ - "dmcgowan", - "dmp42", - "stevvooe", - ] - [Org.Reviewers] - people = [ - "manishtomar", - "caervs", - "davidswu", - "RobbKistler" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.caervs] - Name = "Ryan Abrams" - Email = "rdabrams@gmail.com" - GitHub = "caervs" - - [people.davidswu] - Name = "David Wu" - Email = "dwu7401@gmail.com" - GitHub = "davidswu" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.manishtomar] - Name = "Manish Tomar" - Email = "manish.tomar@docker.com" - GitHub = "manishtomar" - - [people.RobbKistler] - Name = "Robb Kistler" - Email = "robb.kistler@docker.com" - GitHub = "RobbKistler" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index 75e11820152..00000000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -# Root directory of the project (absolute path). -ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) -REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) - - -PKG=github.com/docker/distribution - -# Project packages. -PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) -INTEGRATION_PACKAGE=${PKG} -COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) - - -# Project binaries. -COMMANDS=registry digest registry-api-descriptor-template - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -WHALE = "+" - -# Go files -# -TESTFLAGS_RACE= -GOFILES=$(shell find . -type f -name '*.go') -GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' - -BINARIES=$(addprefix bin/,$(COMMANDS)) - -# Flags passed to `go test` -TESTFLAGS ?= -v $(TESTFLAGS_RACE) -TESTFLAGS_PARALLEL ?= 8 - -.PHONY: all build binaries check clean test test-race test-full integration coverage -.DEFAULT: all - -all: binaries - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - @echo "$(WHALE) $@" - ./version/version.sh > $@ - -check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") - @echo "$(WHALE) $@" - @GO111MODULE=off golangci-lint run - -test: ## run tests, except integration test with test.short - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-race: ## run tests, except integration test with test.short and race - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-full: ## run tests, except integration tests - @echo "$(WHALE) $@" - @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -integration: ## run integration tests - @echo "$(WHALE) $@" - @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} - -coverage: ## generate coverprofiles from the unit tests - @echo "$(WHALE) $@" - @rm -f coverage.txt - @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null - @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ - go test ${GO_TAGS} ${TESTFLAGS} \ - -cover \ - -coverprofile=profile.out \ - -covermode=atomic $$pkg || exit; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi; \ - done ) - -FORCE: - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - @echo "$(WHALE) $@${BINARY_SUFFIX}" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -build: - @echo "$(WHALE) $@" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index e513c18e969..00000000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository provides the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afec6..00000000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 2a659eaa368..00000000000 --- a/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,265 +0,0 @@ -package distribution - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *v1.Platform `json:"platform,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708e5..00000000000 --- a/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/github.com/docker/distribution/docker-bake.hcl b/vendor/github.com/docker/distribution/docker-bake.hcl deleted file mode 100644 index 91686e608a9..00000000000 --- a/vendor/github.com/docker/distribution/docker-bake.hcl +++ /dev/null @@ -1,56 +0,0 @@ -group "default" { - targets = ["image-local"] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "binary" { - target = "binary" - output = ["./bin"] -} - -target "artifact" { - target = "artifact" - output = ["./bin"] -} - -target "artifact-all" { - inherits = ["artifact"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "image" { - inherits = ["docker-metadata-action"] -} - -target "image-local" { - inherits = ["image"] - output = ["type=docker"] -} - -target "image-all" { - inherits = ["image"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 8e0b788d6c5..00000000000 --- a/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,119 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 -// manifest but the registry is configured to reject it -var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 8f84a220a97..00000000000 --- a/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "context" - "fmt" - "mime" - - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go deleted file mode 100644 index b5a5321448a..00000000000 --- a/vendor/github.com/docker/distribution/metrics/prometheus.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import "github.com/docker/go-metrics" - -const ( - // NamespacePrefix is the namespace of prometheus metrics - NamespacePrefix = "registry" -) - -var ( - // StorageNamespace is the prometheus namespace of blob/cache related operations - StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) -) diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index 6c32109894d..00000000000 --- a/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,118 +0,0 @@ -package distribution - -import ( - "context" - - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// RepositoryRemover removes given repository -type RepositoryRemover interface { - Remove(ctx context.Context, name reference.Named) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// WithManifestMediaTypes lists the media types the client wishes -// the server to provide. -func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { - return WithManifestMediaTypesOption{mediaTypes} -} - -// WithManifestMediaTypesOption holds a list of accepted media types -type WithManifestMediaTypesOption struct{ MediaTypes []string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index 695bf852f16..00000000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 04e5a3ba01f..00000000000 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,870 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - copy(entries, ctlg.Repositories) - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - resp, err := bs.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index 9120dbed666..00000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,249 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == io.SeekStart && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case io.SeekCurrent: - newOffset += offset - case io.SeekEnd: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case io.SeekStart: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0f7..00000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a3909197c..00000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index ac4c452117d..00000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,129 +0,0 @@ -package cache - -import ( - "context" - - "github.com/docker/distribution" - prometheus "github.com/docker/distribution/metrics" - "github.com/opencontainers/go-digest" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -var ( - // cacheCount is the number of total cache request received/hits/misses - cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") -) - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 42d94d9bde6..00000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index f22df2b850e..00000000000 --- a/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index bd1b4bff61c..00000000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,51 +0,0 @@ -github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b -github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f -github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 v2.2.1 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go index da8b594e7f8..91d9d4bbae9 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -169,8 +169,8 @@ func Erase(helper Helper, reader io.Reader) error { return helper.Delete(serverURL) } -//List returns all the serverURLs of keys in -//the OS store as a list of strings +// List returns all the serverURLs of keys in +// the OS store as a list of strings func List(helper Helper, writer io.Writer) error { accts, err := helper.List() if err != nil { @@ -179,8 +179,8 @@ func List(helper Helper, writer io.Writer) error { return json.NewEncoder(writer).Encode(accts) } -//PrintVersion outputs the current version. +// PrintVersion outputs the current version. func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) + fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) return nil } diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go index 185e367961a..84377c26309 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -1,4 +1,16 @@ package credentials -// Version holds a string describing the current version -const Version = "0.6.4" +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md deleted file mode 100644 index b8a512c3665..00000000000 --- a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs deleted file mode 100644 index e26cd4fc8ed..00000000000 --- a/vendor/github.com/docker/go-metrics/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE deleted file mode 100644 index 8915f02773f..00000000000 --- a/vendor/github.com/docker/go-metrics/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md deleted file mode 100644 index a9e947cb566..00000000000 --- a/vendor/github.com/docker/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) - -This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -## Best Practices - -This packages is meant to be used for collecting metrics in Docker projects. -It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. -If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). - -The following are a few Docker specific rules that will help you name and work with metrics in your project. - -1. Namespace and Subsystem - -This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. - -```go -ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, -}) -``` - -In the example above we are creating metrics for the Docker engine's daemon package. -`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. - -A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. - -2. Declaring your Metrics - -Try to keep all your metric declarations in one file. -This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. - -3. Use labels instead of multiple metrics - -Labels allow you to define one metric such as the time it takes to perform a certain action on an object. -If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. - - -```go -containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") -``` - -The last parameter is the label name or key. -When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. - -```go -containerActions.WithValues("create").UpdateSince(start) -``` - -4. Always use a unit - -The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. -For a timer, the standard unit is seconds and a counter's standard unit is a total. -For gauges you must provide the unit. -This package provides a standard set of units for use within the Docker projects. - -```go -Nanoseconds Unit = "nanoseconds" -Seconds Unit = "seconds" -Bytes Unit = "bytes" -Total Unit = "total" -``` - -If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. - -## Docs - -Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). - -## HTTP Metrics - -To instrument a http handler, you can wrap the code like this: - -```go -namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) -httpMetrics := namespace.NewDefaultHttpMetrics() -metrics.Register(namespace) -instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) -``` -Note: The `handler` label must be provided when a new namespace is created. - -## Additional Metrics - -Additional metrics are also defined here that are not available in the prometheus client. -If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. - - -## Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go deleted file mode 100644 index fe36316a45c..00000000000 --- a/vendor/github.com/docker/go-metrics/counter.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Counter is a metrics that can only increment its current count -type Counter interface { - // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. - // - // If len(vs) == 0, increments the counter by 1. - Inc(vs ...float64) -} - -// LabeledCounter is counter that must have labels populated before use. -type LabeledCounter interface { - WithValues(vs ...string) Counter -} - -type labeledCounter struct { - pc *prometheus.CounterVec -} - -func (lc *labeledCounter) WithValues(vs ...string) Counter { - return &counter{pc: lc.pc.WithLabelValues(vs...)} -} - -func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { - lc.pc.Describe(ch) -} - -func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { - lc.pc.Collect(ch) -} - -type counter struct { - pc prometheus.Counter -} - -func (c *counter) Inc(vs ...float64) { - if len(vs) == 0 { - c.pc.Inc() - } - - c.pc.Add(sumFloat64(vs...)) -} - -func (c *counter) Describe(ch chan<- *prometheus.Desc) { - c.pc.Describe(ch) -} - -func (c *counter) Collect(ch chan<- prometheus.Metric) { - c.pc.Collect(ch) -} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go deleted file mode 100644 index 8fbdfc697d5..00000000000 --- a/vendor/github.com/docker/go-metrics/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go deleted file mode 100644 index 74296e87740..00000000000 --- a/vendor/github.com/docker/go-metrics/gauge.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Gauge is a metric that allows incrementing and decrementing a value -type Gauge interface { - Inc(...float64) - Dec(...float64) - - // Add adds the provided value to the gauge's current value - Add(float64) - - // Set replaces the gauge's current value with the provided value - Set(float64) -} - -// LabeledGauge describes a gauge the must have values populated before use. -type LabeledGauge interface { - WithValues(labels ...string) Gauge -} - -type labeledGauge struct { - pg *prometheus.GaugeVec -} - -func (lg *labeledGauge) WithValues(labels ...string) Gauge { - return &gauge{pg: lg.pg.WithLabelValues(labels...)} -} - -func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { - lg.pg.Describe(c) -} - -func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { - lg.pg.Collect(c) -} - -type gauge struct { - pg prometheus.Gauge -} - -func (g *gauge) Inc(vs ...float64) { - if len(vs) == 0 { - g.pg.Inc() - } - - g.Add(sumFloat64(vs...)) -} - -func (g *gauge) Dec(vs ...float64) { - if len(vs) == 0 { - g.pg.Dec() - } - - g.Add(-sumFloat64(vs...)) -} - -func (g *gauge) Add(v float64) { - g.pg.Add(v) -} - -func (g *gauge) Set(v float64) { - g.pg.Set(v) -} - -func (g *gauge) Describe(c chan<- *prometheus.Desc) { - g.pg.Describe(c) -} - -func (g *gauge) Collect(c chan<- prometheus.Metric) { - g.pg.Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go deleted file mode 100644 index 05601e9ecd2..00000000000 --- a/vendor/github.com/docker/go-metrics/handler.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// HTTPHandlerOpts describes a set of configurable options of http metrics -type HTTPHandlerOpts struct { - DurationBuckets []float64 - RequestSizeBuckets []float64 - ResponseSizeBuckets []float64 -} - -const ( - InstrumentHandlerResponseSize = iota - InstrumentHandlerRequestSize - InstrumentHandlerDuration - InstrumentHandlerCounter - InstrumentHandlerInFlight -) - -type HTTPMetric struct { - prometheus.Collector - handlerType int -} - -var ( - defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} - defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G - defaultResponseSizeBuckets = defaultRequestSizeBuckets -) - -// Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests. This handler is no longer instrumented. -func Handler() http.Handler { - return promhttp.Handler() -} - -func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(metrics, handler.ServeHTTP) -} - -func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { - var handler http.Handler - handler = http.HandlerFunc(handlerFunc) - for _, metric := range metrics { - switch metric.handlerType { - case InstrumentHandlerResponseSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerResponseSize(collector, handler) - } - case InstrumentHandlerRequestSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerRequestSize(collector, handler) - } - case InstrumentHandlerDuration: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerDuration(collector, handler) - } - case InstrumentHandlerCounter: - if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { - handler = promhttp.InstrumentHandlerCounter(collector, handler) - } - case InstrumentHandlerInFlight: - if collector, ok := metric.Collector.(prometheus.Gauge); ok { - handler = promhttp.InstrumentHandlerInFlight(collector, handler) - } - } - } - return handler.ServeHTTP -} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go deleted file mode 100644 index 68b7f51b338..00000000000 --- a/vendor/github.com/docker/go-metrics/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package metrics - -func sumFloat64(vs ...float64) float64 { - var sum float64 - for _, v := range vs { - sum += v - } - - return sum -} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go deleted file mode 100644 index 798315451a7..00000000000 --- a/vendor/github.com/docker/go-metrics/namespace.go +++ /dev/null @@ -1,315 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -type Labels map[string]string - -// NewNamespace returns a namespaces that is responsible for managing a collection of -// metrics for a particual namespace and subsystem -// -// labels allows const labels to be added to all metrics created in this namespace -// and are commonly used for data like application version and git commit -func NewNamespace(name, subsystem string, labels Labels) *Namespace { - if labels == nil { - labels = make(map[string]string) - } - return &Namespace{ - name: name, - subsystem: subsystem, - labels: labels, - } -} - -// Namespace describes a set of metrics that share a namespace and subsystem. -type Namespace struct { - name string - subsystem string - labels Labels - mu sync.Mutex - metrics []prometheus.Collector -} - -// WithConstLabels returns a namespace with the provided set of labels merged -// with the existing constant labels on the namespace. -// -// Only metrics created with the returned namespace will get the new constant -// labels. The returned namespace must be registered separately. -func (n *Namespace) WithConstLabels(labels Labels) *Namespace { - n.mu.Lock() - ns := &Namespace{ - name: n.name, - subsystem: n.subsystem, - labels: mergeLabels(n.labels, labels), - } - n.mu.Unlock() - return ns -} - -func (n *Namespace) NewCounter(name, help string) Counter { - c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} - n.Add(c) - return c -} - -func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { - c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} - n.Add(c) - return c -} - -func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Total), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewTimer(name, help string) Timer { - t := &timer{ - m: prometheus.NewHistogram(n.newTimerOpts(name, help)), - } - n.Add(t) - return t -} - -func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { - t := &labeledTimer{ - m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), - } - n.Add(t) - return t -} - -func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { - return prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Seconds), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { - g := &gauge{ - pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), - } - n.Add(g) - return g -} - -func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { - g := &labeledGauge{ - pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), - } - n.Add(g) - return g -} - -func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, unit), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Describe(ch) - } -} - -func (n *Namespace) Collect(ch chan<- prometheus.Metric) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Collect(ch) - } -} - -func (n *Namespace) Add(collector prometheus.Collector) { - n.mu.Lock() - n.metrics = append(n.metrics, collector) - n.mu.Unlock() -} - -func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - name = makeName(name, unit) - namespace := n.name - if n.subsystem != "" { - namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) - } - name = fmt.Sprintf("%s_%s", namespace, name) - return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) -} - -// mergeLabels merges two or more labels objects into a single map, favoring -// the later labels. -func mergeLabels(lbs ...Labels) Labels { - merged := make(Labels) - - for _, target := range lbs { - for k, v := range target { - merged[k] = v - } - } - - return merged -} - -func makeName(name string, unit Unit) string { - if unit == "" { - return name - } - - return fmt.Sprintf("%s_%s", name, unit) -} - -func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: defaultDurationBuckets, - RequestSizeBuckets: defaultResponseSizeBuckets, - ResponseSizeBuckets: defaultResponseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: durationBuckets, - RequestSizeBuckets: requestSizeBuckets, - ResponseSizeBuckets: responseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { - var httpMetrics []*HTTPMetric - inFlightMetric := n.NewInFlightGaugeMetric(handlerName) - requestTotalMetric := n.NewRequestTotalMetric(handlerName) - requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) - requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) - responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) - httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) - return httpMetrics -} - -func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "in_flight_requests", - Help: "The in-flight HTTP requests", - ConstLabels: prometheus.Labels(labels), - }) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerInFlight, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: prometheus.Labels(labels), - }, - []string{"code", "method"}, - ) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerCounter, - } - n.Add(httpMetric) - return httpMetric -} -func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("DurationBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_duration_seconds", - Help: "The HTTP request latencies in seconds.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{"method"}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerDuration, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("RequestSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_size_bytes", - Help: "The HTTP request sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerRequestSize, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("ResponseSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "response_size_bytes", - Help: "The HTTP response sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metrics := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metrics, - handlerType: InstrumentHandlerResponseSize, - } - n.Add(httpMetric) - return httpMetric -} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go deleted file mode 100644 index 708358df01d..00000000000 --- a/vendor/github.com/docker/go-metrics/register.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Register adds all the metrics in the provided namespace to the global -// metrics registry -func Register(n *Namespace) { - prometheus.MustRegister(n) -} - -// Deregister removes all the metrics in the provided namespace from the -// global metrics registry -func Deregister(n *Namespace) { - prometheus.Unregister(n) -} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go deleted file mode 100644 index 824c98739cf..00000000000 --- a/vendor/github.com/docker/go-metrics/timer.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// StartTimer begins a timer observation at the callsite. When the target -// operation is completed, the caller should call the return done func(). -func StartTimer(timer Timer) (done func()) { - start := time.Now() - return func() { - timer.Update(time.Since(start)) - } -} - -// Timer is a metric that allows collecting the duration of an action in seconds -type Timer interface { - // Update records an observation, duration, and converts to the target - // units. - Update(duration time.Duration) - - // UpdateSince will add the duration from the provided starting time to the - // timer's summary with the precisions that was used in creation of the timer - UpdateSince(time.Time) -} - -// LabeledTimer is a timer that must have label values populated before use. -type LabeledTimer interface { - WithValues(labels ...string) *labeledTimerObserver -} - -type labeledTimer struct { - m *prometheus.HistogramVec -} - -type labeledTimerObserver struct { - m prometheus.Observer -} - -func (lbo *labeledTimerObserver) Update(duration time.Duration) { - lbo.m.Observe(duration.Seconds()) -} - -func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { - lbo.m.Observe(time.Since(since).Seconds()) -} - -func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { - return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} -} - -func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { - lt.m.Describe(c) -} - -func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { - lt.m.Collect(c) -} - -type timer struct { - m prometheus.Observer -} - -func (t *timer) Update(duration time.Duration) { - t.m.Observe(duration.Seconds()) -} - -func (t *timer) UpdateSince(since time.Time) { - t.m.Observe(time.Since(since).Seconds()) -} - -func (t *timer) Describe(c chan<- *prometheus.Desc) { - c <- t.m.(prometheus.Metric).Desc() -} - -func (t *timer) Collect(c chan<- prometheus.Metric) { - // Are there any observers that don't implement Collector? It is really - // unclear what the point of the upstream change was, but we'll let this - // panic if we get an observer that doesn't implement collector. In this - // case, we should almost always see metricVec objects, so this should - // never panic. - t.m.(prometheus.Collector).Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go deleted file mode 100644 index c96622f9031..00000000000 --- a/vendor/github.com/docker/go-metrics/unit.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -// Unit represents the type or precision of a metric that is appended to -// the metrics fully qualified name -type Unit string - -const ( - Nanoseconds Unit = "nanoseconds" - Seconds Unit = "seconds" - Bytes Unit = "bytes" - Total Unit = "total" -) diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab07155..c245a89513f 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -2,7 +2,6 @@ package units import ( "fmt" - "regexp" "strconv" "strings" ) @@ -26,16 +25,17 @@ const ( PiB = 1024 * TiB ) -type unitMap map[string]int64 +type unitMap map[byte]int64 var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 @@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } - size, err := strconv.ParseFloat(matches[1], 64) + size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix } return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf432..1d89d85ce4f 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef50..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f56d..77f9593bd58 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + ## [1.5.4] - 2022-04-25 * Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) @@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563d71..ea379759d51 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb059..fb03ade7506 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef8ad..d4e6080feb2 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -Cross platform: Windows, Linux, BSD and macOS. +--- -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Platform support: -\* Android and iOS are untested. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Linux and macOS should include Android and iOS, but these are currently untested. -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**How many files can be watched at once?** +This is the event that inotify sends, so not much can be changed about this. -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To increase them you can use `sysctl` or write the value to proc file: -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000000..1a95ad8e7ce --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000000..54c77fbb0ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000000..29087469bf8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000000..a9bb1c3c4d0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000000..ae392867c04 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f55f..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8aa..30a5bf0f07a 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 59688559836..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec8c1..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c3f1..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d8532e7..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 00000000000..b09ef768340 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845b6e..4322b0b8855 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476ffb..5da5ffa78fe 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb0bb..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 00000000000..841c4281e23 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes new file mode 100644 index 00000000000..d020be8ea4e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 00000000000..87c3bd3e66e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 00000000000..e24a6c14e6b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,56 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 40 + gocognit: + min-complexity: 40 + maligned: + suggest-new: true + dupl: + threshold: 150 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + # scopelint is useful, but also reports false positives + # that unfortunately can't be disabled. So we disable the + # linter rather than changing code that works. + # see: https://github.com/kyoh86/scopelint/issues/4 + - scopelint + - godox + - gocognit + #- whitespace + - wsl + - funlen + - testpackage + - wrapcheck + #- nlreturn + - gomnd + - goerr113 + - exhaustivestruct + #- errorlint + #- nestif + - gofumpt + - godot + - gci + - dogsled + - paralleltest + - tparallel + - thelper + - ifshort + - forbidigo + - cyclop + - varnamelen + - exhaustruct + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 00000000000..aad6da10fe7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,31 @@ +# OpenAPI initiative analysis + +[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) +[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) +[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. + +## What's inside? + +* A analyzer providing methods to walk the functional content of a specification +* A spec flattener producing a self-contained document bundle, while preserving `$ref`s +* A spec merger ("mixin") to merge several spec documents into a primary spec +* A spec "fixer" ensuring that response descriptions are non empty + +[Documentation](https://godoc.org/github.com/go-openapi/analysis) + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 00000000000..c17aee1b617 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,1064 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) //#nosec + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", response.Schema, refPref) + } + } + + for name := range s.spec.Definitions { + schema := s.spec.Definitions[name] + s.analyzeSchema(name, &schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + s.analyzeParameter(prefix, i, param) + } + + if op.Responses == nil { + return + } + + if op.Responses.Default != nil { + s.analyzeDefaultResponse(prefix, op.Responses.Default) + } + + for k, res := range op.Responses.StatusCodeResponses { + s.analyzeResponse(prefix, k, res) + } +} + +func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { + refPref := slashpath.Join(prefix, "responses", "default") + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, res) + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) //#nosec + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) + } + + for k, v := range schema.Properties { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) + } + + for k, v := range schema.PatternProperties { + v := v + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) + } + + for i := range schema.AllOf { + v := &schema.AllOf[i] + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + + for i := range schema.AnyOf { + v := &schema.AnyOf[i] + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + + for i := range schema.OneOf { + v := &schema.OneOf[i] + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", schema.Not, refURI) + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", schema.Items.Schema, refURI) + } + + for i := range schema.Items.Schemas { + sch := &schema.Items.Schemas[i] + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + + continue + } + + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + + result = append(result, reqs) + } + + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() == "" { + res[mapKeyFromParam(&pr)] = pr + + continue + } + + // resolve $ref + if callmeOnError == nil { + callmeOnError = func(_ spec.Parameter, err error) bool { + panic(err) + } + } + + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + + break + } + + objAsParam, ok := obj.(spec.Parameter) + if !ok { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + + break + } + + pr = objAsParam + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + + return res + } + + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) //#nosec + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) //#nosec + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) //#nosec + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) //#nosec + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) //#nosec + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) //#nosec + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) //#nosec + } + } + + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + + return op, fn + } + + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml new file mode 100644 index 00000000000..c2f6fd733a9 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/appveyor.yml @@ -0,0 +1,32 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\analysis +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.16 + +test_script: + - go test -v -timeout 20m ./... + +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 00000000000..33c15704ecb --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "os" + + "github.com/go-openapi/analysis/internal/debug" +) + +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 00000000000..d5294c0950b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 00000000000..7c2ca084162 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + for k, v := range s.Responses { + FixEmptyDesc(&v) //#nosec + s.Responses[k] = v + } + + if s.Paths == nil { + return + } + + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) //#nosec + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 00000000000..0576220fb3d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,802 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *spec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// Self-referencing JSON pointers cannot resolve to a type and trigger an error. +// +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +// +func Flatten(opts FlattenOpts) error { + debugLog("FlattenOpts: %#v", opts) + + opts.flattenContext = newContext() + + // 1. Recursively expand responses, parameters, path items and items in simple schemas. + // + // This simplifies the spec and leaves only the $ref's in schema objects. + if err := expand(&opts); err != nil { + return err + } + + // 2. Strip the current document from absolute $ref's that actually a in the root, + // so we can recognize them as proper definitions + // + // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + // 3. Optionally remove shared parameters and responses already expanded (now unused). + // + // Operation parameters (i.e. under paths) remain. + if opts.RemoveUnused { + removeUnusedShared(&opts) + } + + // 4. Import all remote references. + if err := importReferences(&opts); err != nil { + return err + } + + // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if !opts.Minimal && !opts.Expand { + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + } + + // 6. Rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + // 7. Strip the spec from unused definitions + if opts.RemoveUnused { + removeUnused(&opts) + } + + // 8. Issue warning notifications, if any + opts.croak() + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + return nil +} + +func expand(opts *FlattenOpts) error { + if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + + return nil +} + +// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + + altered := false + for k, w := range opts.Spec.references.allRefs { + if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + continue + } + + altered = true + debugLog("stripping absolute path for: %s", w.String()) + + // strip the base path from definition + if err := replace.UpdateRef(opts.Swagger(), k, + spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { + return err + } + } + + if altered { + opts.Spec.reload() // re-analyze + } + + return nil +} + +func removeUnusedShared(opts *FlattenOpts) { + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + + opts.Spec.reload() // re-analyze +} + +func importReferences(opts *FlattenOpts) error { + var ( + imported bool + err error + ) + + for !imported && err == nil { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + imported, err = importExternalReferences(opts) + + opts.Spec.reload() // re-analyze + } + + return err +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { + continue + } + + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, err) + } + + if asch.isAnalyzedAsComplex() { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func removeUnused(opts *FlattenOpts) { + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + + for k := range expected { + debugLog("removing unused definition %s", path.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", path.Base(k)) + } + delete(opts.Swagger().Definitions, path.Base(k)) + } + + opts.Spec.reload() // re-analyze +} + +func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + return nil +} + +func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { + var ( + isOAIGen bool + newName string + ) + + debugLog("resolving schema from remote $ref [%s]", refStr) + + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return fmt.Errorf("could not resolve schema: %w", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + + debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + schutils.Save(opts.Swagger(), newName, sch) + + return nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + + complete = false + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + if err := importKnownRef(entry, refStr, newName, opts); err != nil { + return false, err + } + + continue + } + + // resolve schemas + if err := importNewRef(entry, refStr, opts); err != nil { + return false, err + } + } + + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := spec.MustCreateRef(r.path) + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %w", err) + } + + r.schema = sch + } + + if r.path == k { + continue + } + + // update tracking with renamed keys: got a cascade of refs + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = path.Base(k) + r.schema = spec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + + return complete, nil +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip and re-analyze + var err error + if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { + return err + } + } + + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions (doing it before the ref start mutating) + for _, r := range opts.flattenContext.newRefs { + updateRefParents(opts.Spec.references.allRefs, r) + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + + if !r.isOAIGen || len(r.parents) == 0 { + continue + } + + hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) + if err != nil { + return replacedWithComplex, err + } + + replacedWithComplex = replacedWithComplex || hasReplacedWithComplex + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + + return replacedWithComplex, nil +} + +// updateRefParents updates all parents of an updated $ref +func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + return + } + for k, v := range allRefs { + if r.path != v.String() { + continue + } + + found := false + for _, p := range r.parents { + if p == k { + found = true + + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } +} + +func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { + replacedWithComplex := false + + pr := sortref.TopmostFirst(r.parents) + + // rewrite first parent schema in hierarchical then lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := spec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", path.Base(r.path)) + delete(opts.Swagger().Definitions, path.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + + newParents = append(newParents, parent) + } + + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + + debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) + replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() + } + + return replacedWithComplex, nil +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + if path.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + + result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) + if err != nil { + return fmt.Errorf("at %s, %w", k, err) + } + + replacingRef := result.Ref + sch := result.Schema + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // called + Schema: sch, + TopLevel: path.Dir(replacingRef.String()) == definitionsPath, + } + } + + depthFirst := sortref.DepthFirst(refsToReplace) + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %w", key, erd) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + v.Ref = result.Ref + v.Schema = result.Schema + v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + + continue + } + + if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { + return err + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) + if err != nil { + return fmt.Errorf("at %s, %w", key, err) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() == v.Ref.String() { + callers = append(callers, k) + } + } + + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + return nil + } + + parts := sortref.KeyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller == key { + continue + } + + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + + return nil + } + + debugLog("expand JSON pointer for key=%s", key) + + if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go new file mode 100644 index 00000000000..3ad2ccfbfd5 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -0,0 +1,293 @@ +package analysis + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// InlineSchemaNamer finds a new name for an inlined type +type InlineSchemaNamer struct { + Spec *spec.Swagger + Operations map[string]operations.OpRef + flattenContext *context + opts *FlattenOpts +} + +// Name yields a new name for the inline schema +func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := sortref.KeyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name == "" { + continue + } + + // create unique name + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + + // clone schema + sch := schutils.Clone(schema) + + // replace values on schema + if err := replace.RewriteSchemaToRef(isn.Spec, key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) + if erd != nil { + return fmt.Errorf("at %s, %w", k, erd) + } + + if isn.opts.flattenContext != nil { + isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { + continue + } + + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := replace.UpdateRef(isn.Spec, k, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", GenLocation(parts)) + + // save cloned schema to definitions + schutils.Save(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext == nil { + continue + } + + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + return nil +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions spec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + + return unique, isOAIGen +} + +func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { + var ( + baseNames [][]string + startIndex int + ) + + if parts.IsOperation() { + baseNames, startIndex = namesForOperation(parts, operations) + } + + // definitions + if parts.IsDefinition() { + baseNames, startIndex = namesForDefinition(parts) + } + + result := make([]string, 0, len(baseNames)) + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, partAdder(aschema)) + if nm == "" { + continue + } + + result = append(result, nm) + } + sort.Strings(result) + + return result +} + +func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + baseNames, startIndex = namesForParam(parts, operations) + } + + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { + nm := parts.DefinitionName() + if nm != "" { + return [][]string{{parts.DefinitionName()}}, 2 + } + + return [][]string{}, 0 +} + +// partAdder knows how to interpret a schema when it comes to build a name from parts +func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { + return func(part string) []string { + segments := make([]string, 0, 2) + + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + + if part == "additionalItems" { + segments = append(segments, part) + } + + return segments + } + + segments = append(segments, part) + + return segments + } +} + +func nameFromRef(ref spec.Ref) string { + u := ref.GetURL() + if u.Fragment != "" { + return swag.ToJSONName(path.Base(u.Fragment)) + } + + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return swag.ToJSONName(bn[:len(bn)-len(ext)]) + } + + return swag.ToJSONName(bn) + } + } + + return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) +} + +// GenLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// for information only. +func GenLocation(parts sortref.SplitKey) string { + switch { + case parts.IsOperation(): + return "operations" + case parts.IsDefinition(): + return "models" + default: + return "" + } +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go new file mode 100644 index 00000000000..c5bb97b0a69 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -0,0 +1,78 @@ +package analysis + +import ( + "log" + + "github.com/go-openapi/spec" +) + +// FlattenOpts configuration for flattening a swagger specification. +// +// The BasePath parameter is used to locate remote relative $ref found in the specification. +// This path is a file: it points to the location of the root document and may be either a local +// file path or a URL. +// +// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") +// found in the spec are searched from the current working directory. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string // The location of the root document for this spec to resolve relative $ref + + // Flattening options + Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) + Minimal bool // When true, do not decompose complex structures such as allOf + Verbose bool // enable some reporting on possible name conflicts detected + RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening + ContinueOnError bool // Continue when spec expansion issues are found + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { + return &spec.ExpandOptions{ + RelativeBase: f.BasePath, + SkipSchemas: skipSchemas, + ContinueOnError: f.ContinueOnError, + } +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *spec.Swagger { + return f.Spec.spec +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func (f *FlattenOpts) croak() { + if !f.Verbose { + return + } + + reported := make(map[string]bool, len(f.flattenContext.newRefs)) + for _, v := range f.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range f.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range f.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go new file mode 100644 index 00000000000..ec0fec02298 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debug + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + output = os.Stdout +) + +// GetLogger provides a prefix debug logger +func GetLogger(prefix string, debug bool) func(string, ...interface{}) { + if debug { + logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) + + return func(msg string, args ...interface{}) { + _, file1, pos1, _ := runtime.Caller(1) + logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } + } + + return func(msg string, args ...interface{}) {} +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go new file mode 100644 index 00000000000..8c9df0580da --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -0,0 +1,87 @@ +package normalize + +import ( + "net/url" + "path" + "path/filepath" + "strings" + + "github.com/go-openapi/spec" +) + +// RebaseRef rebases a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func RebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = path.Dir(baseURL.Path) + baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) + + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + + return relPath +} + +// Path renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func Path(ref spec.Ref, basePath string) string { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + return uri + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + return uri + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) + + return strings.Join(parts, "#") +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go new file mode 100644 index 00000000000..7f3a2b8717f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -0,0 +1,90 @@ +package operations + +import ( + "path" + "sort" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// AllOpRefsByRef returns an index of sortable operations +func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { + return OpRefsByRef(GatherOperations(specDoc, operationIDs)) +} + +// OpRefsByRef indexes a map of sortable operations +func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { + result := make(map[string]OpRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + + return result +} + +// OpRef is an indexable, sortable operation +type OpRef struct { + Method string + Path string + Key string + ID string + Op *spec.Operation + Ref spec.Ref +} + +// OpRefs is a sortable collection of operations +type OpRefs []OpRef + +func (o OpRefs) Len() int { return len(o) } +func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +// Provider knows how to collect operations from a spec +type Provider interface { + Operations() map[string]map[string]*spec.Operation +} + +// GatherOperations builds a map of sorted operations from a spec +func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { + var oprefs OpRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, OpRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]OpRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + + if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go new file mode 100644 index 00000000000..26c2a05a310 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -0,0 +1,434 @@ +package replace + +import ( + "fmt" + "net/url" + "os" + "path" + "strconv" + + "github.com/go-openapi/analysis/internal/debug" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") + +// RewriteSchemaToRef replaces a schema with a Ref +func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + return rewriteParentRef(sp, key, ref) + + case spec.Schema: + return rewriteParentRef(sp, key, ref) + + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(sp, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case spec.Response: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case *spec.Response: + container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]spec.Response: + resp := container[entry] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = resp + + case spec.Parameter: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case map[string]spec.Parameter: + param := container[entry] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = param + + case []spec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + param := container[idx] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[idx] = param + + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + + return nil +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", sp, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(sp) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + + return "", nil, err + } + + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + + parent, entry := path.Dir(pth), path.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(sp) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) + } + + return parent, entry, pvalue, nil +} + +// UpdateRef replaces a ref by another one +func UpdateRef(sp interface{}, key string, ref spec.Ref) error { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + refable.Ref = ref + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case spec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + *refable = *sch + case spec.Schema: + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = *sch + + case map[string]spec.Schema: + container[entry] = *sch + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = *sch + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = *sch + + case spec.SchemaProperties: + container[entry] = *sch + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *spec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + case *spec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// DeepestRefResult holds the results from DeepestRef analysis +type DeepestRefResult struct { + Ref spec.Ref + Schema *spec.Schema + Warnings []string +} + +// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd at this stage: + // do nothing on external $refs + return &DeepestRefResult{Ref: ref}, nil + } + + currentRef := ref + visited := make(map[string]bool, 64) + warnings := make([]string, 0, 2) + +DOWNREF: + for currentRef.String() != "" { + if path.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return &DeepestRefResult{Ref: currentRef}, nil + } + + if _, beenThere := visited[currentRef.String()]; beenThere { + return nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(sp) + if err != nil { + return nil, err + } + + switch refable := value.(type) { + case *spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *spec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *spec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case spec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case spec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + } + + warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", + currentRef.String(), value) + } + } + + // assess what schema we're ending with + sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) + if erv != nil { + return nil, erv + } + + if sch == nil { + return nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + + return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go new file mode 100644 index 00000000000..4590236e683 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -0,0 +1,29 @@ +// Package schutils provides tools to save or clone a schema +// when flattening a spec. +package schutils + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// Save registers a schema as an entry in spec #/definitions +func Save(sp *spec.Swagger, name string, schema *spec.Schema) { + if schema == nil { + return + } + + if sp.Definitions == nil { + sp.Definitions = make(map[string]spec.Schema, 150) + } + + sp.Definitions[name] = *schema +} + +// Clone deep-clones a schema +func Clone(schema *spec.Schema) *spec.Schema { + var sch spec.Schema + _ = swag.FromDynamicJSON(schema, &sch) + + return &sch +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go new file mode 100644 index 00000000000..18e552eadce --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -0,0 +1,201 @@ +package sortref + +import ( + "net/http" + "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +// Key represent a key item constructed from /-separated segments +type Key struct { + Segments int + Key string +} + +// Keys is a sortable collable collection of Keys +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +func KeyParts(key string) SplitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + + return res +} + +// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. +type SplitKey []string + +// IsDefinition is true when the split key is in the #/definitions section of a spec +func (s SplitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +// DefinitionName yields the name of the definition +func (s SplitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + + return s[1] +} + +func (s SplitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +// PartAdder know how to construct the components of a new name +type PartAdder func(string) []string + +// BuildName builds a name from segments +func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + segments = append(segments, adder(part)...) + } + } + + return strings.Join(segments, " ") +} + +// IsOperation is true when the split key is in the operations section +func (s SplitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +// IsSharedOperationParam is true when the split key is in the parameters section of a path +func (s SplitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +// IsSharedParam is true when the split key is in the #/parameters section of a spec +func (s SplitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +// IsOperationParam is true when the split key is in the parameters section of an operation +func (s SplitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +// IsOperationResponse is true when the split key is in the responses section of an operation +func (s SplitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +// IsSharedResponse is true when the split key is in the #/responses section of a spec +func (s SplitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +// IsDefaultResponse is true when the split key is the default response for an operation +func (s SplitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +// IsStatusCodeResponse is true when the split key is an operation response with a status code +func (s SplitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + + return err == nil + } + + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +// ResponseName yields either the status code or "Default" for a response +func (s SplitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + + return http.StatusText(code) + } + + if s.IsDefaultResponse() { + return "Default" + } + + return "" +} + +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +func (s SplitKey) PathItemRef() spec.Ref { + if len(s) < 3 { + return spec.Ref{} + } + + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +// PathRef constructs a $ref object from a split key of the form /paths/{reference} +func (s SplitKey) PathRef() spec.Ref { + if !s.IsOperation() { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go new file mode 100644 index 00000000000..73243df87f0 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -0,0 +1,141 @@ +package sortref + +import ( + "reflect" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/spec" +) + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +type mapIterator struct { + len int + mapIter *reflect.MapIter +} + +func (i *mapIterator) Next() bool { + return i.mapIter.Next() +} + +func (i *mapIterator) Len() int { + return i.len +} + +func (i *mapIterator) Key() string { + return i.mapIter.Key().String() +} + +func mustMapIterator(anyMap interface{}) *mapIterator { + val := reflect.ValueOf(anyMap) + + return &mapIterator{mapIter: val.MapRange(), len: val.Len()} +} + +// DepthFirst sorts a map of anything. It groups keys by category +// (shared params, op param, statuscode response, default response, definitions) +// sort groups internally by number of parts in the key and lexical names +// flatten groups into a single list of keys +func DepthFirst(in interface{}) []string { + iterator := mustMapIterator(in) + sorted := make([]string, 0, iterator.Len()) + grouped := make(map[string]Keys, iterator.Len()) + + for iterator.Next() { + k := iterator.Key() + split := KeyParts(k) + var pk string + + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return sorted +} + +// topMostRefs is able to sort refs by hierarchical then lexicographic order, +// yielding refs ordered breadth-first. +type topmostRefs []string + +func (k topmostRefs) Len() int { return len(k) } +func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k topmostRefs) Less(i, j int) bool { + li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) + if li == lj { + return k[i] < k[j] + } + + return li < lj +} + +// TopmostFirst sorts references by depth +func TopmostFirst(refs []string) []string { + res := topmostRefs(refs) + sort.Sort(res) + + return res +} + +// RefRevIdx is a reverse index for references +type RefRevIdx struct { + Ref spec.Ref + Keys []string +} + +// ReverseIndex builds a reverse index for references in schemas +func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { + collected := make(map[string]RefRevIdx) + for key, schRef := range schemas { + // normalize paths before sorting, + // so we get together keys that are from the same external file + normalizedPath := normalize.Path(schRef, basePath) + + entry, ok := collected[normalizedPath] + if ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + + continue + } + + collected[normalizedPath] = RefRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + + return collected +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 00000000000..b253052648c --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,515 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIds := getOpIds(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + + return skipped +} + +// getOpIds extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIds(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + + for _, op := range piops { + rv[op.ID] = true + } + } + + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + primary.SecurityDefinitions[k] = v + } + + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + + continue + } + primary.Security = append(primary.Security, v) + } + + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Definitions[k] = v + } + + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIds[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIds[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Parameters[k] = v + } + + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Responses[k] = v + } + + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", + v.Name, + ) + skipped = append(skipped, warn) + + continue + } + + primary.Tags = append(primary.Tags, v) + } + + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped, skippedInfo, skippedDocs []string + + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + skippedInfo = mergeInfo(primary.Info, m.Info) + skipped = append(skipped, skippedInfo...) + } + + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m != nil { + skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) + skipped = append(skipped, skippedDocs...) + } + + return skipped +} + +// nolint: unparam +func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.URL == "" { + primary.URL = m.URL + } + + return nil +} + +func mergeInfo(primary *spec.Info, m *spec.Info) []string { + var sk, skipped []string + + primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) + skipped = append(skipped, sk...) + + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.Title == "" { + primary.Description = m.Description + } + + if primary.TermsOfService == "" { + primary.TermsOfService = m.TermsOfService + } + + if primary.Version == "" { + primary.Version = m.Version + } + + if primary.Contact == nil { + primary.Contact = m.Contact + } else if m.Contact != nil { + var csk []string + primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) + skipped = append(skipped, csk...) + + if primary.Contact.Name == "" { + primary.Contact.Name = m.Contact.Name + } + + if primary.Contact.URL == "" { + primary.Contact.URL = m.Contact.URL + } + + if primary.Contact.Email == "" { + primary.Contact.Email = m.Contact.Email + } + } + + if primary.License == nil { + primary.License = m.License + } else if m.License != nil { + var lsk []string + primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) + skipped = append(skipped, lsk...) + + if primary.License.Name == "" { + primary.License.Name = m.License.Name + } + + if primary.License.URL == "" { + primary.License.URL = m.License.URL + } + } + + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + + return + } + + if m == nil { + result = primary + + return + } + + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + + continue + } + + primary[k] = v + } + + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 00000000000..fc055095cbb --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,256 @@ +package analysis + +import ( + "fmt" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, fmt.Errorf("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if !a.isObjectType() { + return nil + } + + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + + if !a.IsMap { + return nil + } + + // maps + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { + return !a.IsSimpleSchema && !a.IsArray && !a.IsMap +} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes new file mode 100644 index 00000000000..a0717e4b3b9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 00000000000..dd91ed6a04e --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 00000000000..4e1fc0c7d48 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,48 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - paralleltest + - tparallel + - cyclop + - errname + - varnamelen + - exhaustruct + - maintidx diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 00000000000..4aac049e608 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,11 @@ +# OpenAPI errors + +[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) +[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 00000000000..77f1f92c5e3 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,182 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// MarshalJSON implements the JSON encoding interface +func (a apiError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": a.code, + "message": a.message, + }) +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{code, fmt.Sprintf(message, args...)} + } + return &apiError{code, message} +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +// MarshalJSON implements the JSON encoding interface +func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": m.code, + "message": m.message, + "allowed": m.Allowed, + }) +} + +func errorAsJSON(err Error) []byte { + //nolint:errchkjson + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} +} + +// ServeError the error handler interface implementation +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= 600 { + return DefaultHTTPCode + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 00000000000..0545b501bd7 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 00000000000..af01190ce61 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 00000000000..dfebe8f95f0 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e Validation) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "values": e.Values, + }) +} + +// ValidateName sets the name for a validation or updates it for a nested property +func (e *Validation) ValidateName(name string) *Validation { + if name != "" { + if e.Name == "" { + e.Name = name + e.message = name + e.message + } else { + e.Name = name + "." + e.Name + e.message = name + "." + e.message + } + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 00000000000..963472d1f34 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,50 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { + Section string `json:"section,omitempty"` + MissingSpecification []string `json:"missingSpecification,omitempty"` + MissingRegistration []string `json:"missingRegistration,omitempty"` +} + +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 00000000000..5096e1ea7be --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" +) + +// ParseError represents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e ParseError) MarshalJSON() ([]byte, error) { + var reason string + if e.Reason != nil { + reason = e.Reason.Error() + } + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "reason": reason, + }) +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: 400, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 00000000000..da5f6c78cb5 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,611 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + readOnlyFail = "%s in %s is readOnly" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maxIncFail = "%s in %s should be less than or equal to %v" + maxExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maxItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + readOnlyFailNoIn = "%s is readOnly" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maxIncFailNoIn = "%s should be less than or equal to %v" + maxExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maxItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = 422 + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = 600 + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode + ReadOnlyFailCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +// MarshalJSON implements the JSON encoding interface +func (c CompositeError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": c.code, + "message": c.message, + "errors": c.Errors, + }) +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append([]error{}, errors...), + message: "validation failure list", + } +} + +// ValidateName recursively sets the name for all validations or updates them for nested properties +func (c *CompositeError) ValidateName(name string) *CompositeError { + for i, e := range c.Errors { + if ve, ok := e.(*Validation); ok { + c.Errors[i] = ve.ValidateName(name) + } else if ce, ok := e.(*CompositeError); ok { + c.Errors[i] = ce.ValidateName(name) + } + } + + return c +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, max int64, value interface{}) *Validation { + msg := fmt.Sprintf(maxItemsFail, name, in, max) + if in == "" { + msg = fmt.Sprintf(maxItemsFailNoIn, name, max) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, min int64, value interface{}) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, min) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, min) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ExceedsMaximumInt error for when maximum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maximum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maximum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when minimum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when minimum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when minimum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ReadOnly error for when a value is present in request +func ReadOnly(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(readOnlyFailNoIn, name) + } else { + msg = fmt.Sprintf(readOnlyFail, name, in) + } + return &Validation{ + code: ReadOnlyFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, max int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, max) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, max) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, min int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, min) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, min) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 00000000000..8956c30884d --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,63 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 3bc0a6e26f8..cfdef03e5d9 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -30,8 +30,8 @@ import ( "net/url" "strings" - "github.com/PuerkitoBio/purell" "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" ) const ( @@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error { return err } - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + internal.NormalizeURL(parsed) + + r.referenceURL = parsed refURL := r.referenceURL if refURL.Scheme != "" && refURL.Host != "" { diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 00000000000..e4f15f17bfc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 00000000000..d48b4a5156e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,44 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - paralleltest diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 00000000000..cd4a7c331bc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,25 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.16.x +- 1.x +install: +- go get gotest.tools/gotestsum +language: go +arch: +- amd64 +- ppc64le +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 00000000000..df1f6264623 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,6 @@ +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 00000000000..3046da4cef3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package loads provides document loading methods for swagger (OAI) specifications. + +It is used by other go-openapi packages to load and run analysis on local or remote spec documents. + +*/ +package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go new file mode 100644 index 00000000000..44bd32b5b88 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -0,0 +1,134 @@ +package loads + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +var ( + // Default chain of loaders, defined at the package level. + // + // By default this matches json and yaml documents. + // + // May be altered with AddLoader(). + loaders *loader +) + +func init() { + jsonLoader := &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: func(pth string) bool { + return true + }, + Fn: JSONDoc, + }, + } + + loaders = jsonLoader.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: swag.YAMLMatcher, + Fn: swag.YAMLDoc, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +// DocLoaderWithMatch describes a loading function for a given extension match. +type DocLoaderWithMatch struct { + Fn DocLoader + Match DocMatcher +} + +// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { + return DocLoaderWithMatch{ + Fn: fn, + Match: matcher, + } +} + +type loader struct { + DocLoaderWithMatch + Next *loader +} + +// WithHead adds a loader at the head of the current stack +func (l *loader) WithHead(head *loader) *loader { + if head == nil { + return l + } + head.Next = l + return head +} + +// WithNext adds a loader at the trail of the current stack +func (l *loader) WithNext(next *loader) *loader { + l.Next = next + return next +} + +// Load the raw document from path +func (l *loader) Load(path string) (json.RawMessage, error) { + _, erp := url.Parse(path) + if erp != nil { + return nil, erp + } + + var lastErr error = errors.New("no loader matched") // default error if no match was found + for ldr := l; ldr != nil; ldr = ldr.Next { + if ldr.Match != nil && !ldr.Match(path) { + continue + } + + // try then move to next one if there is an error + b, err := ldr.Fn(path) + if err == nil { + return b, nil + } + + lastErr = err + } + + return nil, lastErr +} + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// AddLoader for a document, executed before other previously set loaders. +// +// This sets the configuration at the package level. +// +// NOTE: +// * this updates the default loader used by github.com/go-openapi/spec +// * since this sets package level globals, you shouln't call this concurrently +// +func AddLoader(predicate DocMatcher, load DocLoader) { + loaders = loaders.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: predicate, + Fn: load, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go new file mode 100644 index 00000000000..f8305d5607c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/options.go @@ -0,0 +1,61 @@ +package loads + +type options struct { + loader *loader +} + +func defaultOptions() *options { + return &options{ + loader: loaders, + } +} + +func loaderFromOptions(options []LoaderOption) *loader { + opts := defaultOptions() + for _, apply := range options { + apply(opts) + } + + return opts.loader +} + +// LoaderOption allows to fine-tune the spec loader behavior +type LoaderOption func(*options) + +// WithDocLoader sets a custom loader for loading specs +func WithDocLoader(l DocLoader) LoaderOption { + return func(opt *options) { + if l == nil { + return + } + opt.loader = &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Fn: l, + }, + } + } +} + +// WithDocLoaderMatches sets a chain of custom loaders for loading specs +// for different extension matches. +// +// Loaders are executed in the order of provided DocLoaderWithMatch'es. +func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { + return func(opt *options) { + var final, prev *loader + for _, ldr := range l { + if ldr.Fn == nil { + continue + } + + if prev == nil { + final = &loader{DocLoaderWithMatch: ldr} + prev = final + continue + } + + prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) + } + opt.loader = final + } +} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 00000000000..93c8d4b8955 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,266 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage + pathLoader *loader +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string, options ...LoaderOption) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + return Analyzed(data, "", options...) +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + pathLoader: loaderFromOptions(options), + }, nil +} + +// Spec loads a new spec document from a local or remote path +func Spec(path string, options ...LoaderOption) (*Document, error) { + + ldr := loaderFromOptions(options) + + b, err := ldr.Load(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "", options...) + if err != nil { + return nil, err + } + + if document != nil { + document.specFilePath = path + document.pathLoader = ldr + } + + return document, err +} + +// Analyzed creates a new analyzed spec document for a root json.RawMessage. +func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw, err := trimData(data) // trim blanks, then convert yaml docs into json + if err != nil { + return nil, err + } + + swspec := new(spec.Swagger) + if err = json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + pathLoader: loaderFromOptions(options), + } + + return d, nil +} + +func trimData(in json.RawMessage) (json.RawMessage, error) { + trimmed := bytes.TrimSpace(in) + if len(trimmed) == 0 { + return in, nil + } + + if trimmed[0] == '{' || trimmed[0] == '[' { + return trimmed, nil + } + + // assume yaml doc: convert it to json + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + return d, nil +} + +// Expanded expands the ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if expandOptions.PathLoader == nil { + if d.pathLoader != nil { + // use loader from Document options + expandOptions.PathLoader = d.pathLoader.Load + } else { + // use package level loader + expandOptions.PathLoader = loaders.Load + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset to the original spec +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + dd, _ := Analyzed(d.Raw(), d.Version()) + dd.pathLoader = d.pathLoader + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes new file mode 100644 index 00000000000..d207b1802b2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 00000000000..fea8b84eca9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml new file mode 100644 index 00000000000..b1aa7928a7c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -0,0 +1,44 @@ +linters-settings: + govet: + # Using err repeatedly considered as shadowing. + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - noctx + - interfacer + - nilerr diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 00000000000..5b1ec649454 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,7 @@ +# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) + +# golang Open-API toolkit - runtime + +The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 00000000000..6eb6ceb5c5d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,169 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consumer for byte streams, +// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, +// and reads from the provided reader +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + + close := defaultCloser + if vals.Close { + if cl, ok := reader.(io.Closer); ok { + close = cl.Close + } + } + //nolint:errcheck // closing a reader wouldn't fail. + defer close() + + if wrtr, ok := data.(io.Writer); ok { + _, err := io.Copy(wrtr, reader) + return err + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + if bu, ok := data.(encoding.BinaryUnmarshaler); ok { + return bu.UnmarshalBinary(b) + } + + if data != nil { + if str, ok := data.(*string); ok { + *str = string(b) + return nil + } + } + + if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + v.SetBytes(b) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams, +// takes a Reader/BinaryMarshaler interface or binary slice, +// and writes to a writer (essentially a pipe) +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + close := defaultCloser + if vals.Close { + if cl, ok := writer.(io.Closer); ok { + close = cl.Close + } + } + //nolint:errcheck // TODO: closing a writer would fail. + defer close() + + if rc, ok := data.(io.ReadCloser); ok { + defer rc.Close() + } + + if rdr, ok := data.(io.Reader); ok { + _, err := io.Copy(writer, rdr) + return err + } + + if bm, ok := data.(encoding.BinaryMarshaler); ok { + bytes, err := bm.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + } + + if data != nil { + if str, ok := data.(string); ok { + _, err := writer.Write([]byte(str)) + return err + } + + if e, ok := data.(error); ok { + _, err := writer.Write([]byte(e.Error())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + _, err := writer.Write(v.Bytes()) + return err + } + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 00000000000..c6c97d9a7c3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 00000000000..fa21eacf330 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 00000000000..3efda348216 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,153 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request +type ClientRequest interface { + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} + +type TestClientRequest struct { + Headers http.Header + Body interface{} +} + +func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { + if t.Headers == nil { + t.Headers = make(http.Header) + } + t.Headers.Set(name, values[0]) + return nil +} + +func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } + +func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } + +func (t *TestClientRequest) SetBodyParam(body interface{}) error { + t.Body = body + return nil +} + +func (t *TestClientRequest) SetTimeout(time.Duration) error { + return nil +} + +func (t *TestClientRequest) GetQueryParams() url.Values { return nil } + +func (t *TestClientRequest) GetMethod() string { return "" } + +func (t *TestClientRequest) GetPath() string { return "" } + +func (t *TestClientRequest) GetBody() []byte { return nil } + +func (t *TestClientRequest) GetBodyParam() interface{} { + return t.Body +} + +func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { + return nil +} + +func (t *TestClientRequest) GetHeaderParams() http.Header { + return t.Headers +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 00000000000..0d1691149d4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,110 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + GetHeaders(string) []string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) +} + +func (o *APIError) String() string { + return o.Error() +} + +// IsSuccess returns true when this elapse o k response returns a 2xx status code +func (o *APIError) IsSuccess() bool { + return o.Code/100 == 2 +} + +// IsRedirect returns true when this elapse o k response returns a 3xx status code +func (o *APIError) IsRedirect() bool { + return o.Code/100 == 3 +} + +// IsClientError returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsClientError() bool { + return o.Code/100 == 4 +} + +// IsServerError returns true when this elapse o k response returns a 5xx status code +func (o *APIError) IsServerError() bool { + return o.Code/100 == 5 +} + +// IsCode returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsCode(code int) bool { + return o.Code == code +} + +// A ClientResponseStatus is a common interface implemented by all responses on the generated code +// You can use this to treat any client response based on status code +type ClientResponseStatus interface { + IsSuccess() bool + IsRedirect() bool + IsClientError() bool + IsServerError() bool + IsCode(int) bool +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 00000000000..515969242ca --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 00000000000..d807bd915b4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,77 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding/csv" + "errors" + "io" +) + +// CSVConsumer creates a new CSV consumer +func CSVConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + + csvReader := csv.NewReader(reader) + writer, ok := data.(io.Writer) + if !ok { + return errors.New("data type must be io.Writer") + } + csvWriter := csv.NewWriter(writer) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} + +// CSVProducer creates a new CSV producer +func CSVProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + + dataBytes, ok := data.([]byte) + if !ok { + return errors.New("data type must be byte array") + } + + csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + csvWriter := csv.NewWriter(writer) + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 00000000000..0d390cfd64c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 00000000000..397d8a45933 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,19 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/swag" + +type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 00000000000..4d111db4fec --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 00000000000..e3341286834 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,112 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} + +// ContextValidatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the context validations obtained from the spec +type ContextValidatable interface { + ContextValidate(context.Context, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 00000000000..5a690559cc5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 00000000000..078fda17396 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,139 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("content-length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 00000000000..3b011a0bff1 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 00000000000..f33320b7dd5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,116 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 00000000000..11f5732af4e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 00000000000..821c7393dfb --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 00000000000..dd91ed6a04e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 00000000000..835d55e7425 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,42 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 00000000000..18782c6dafe --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,34 @@ +# OAI object model + +[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) + +[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml new file mode 100644 index 00000000000..0903593916e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/appveyor.yml @@ -0,0 +1,32 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\spec +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.15 + +test_script: + - go test -v -timeout 20m ./... + +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 00000000000..afc83850c2e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.248kB) + +package spec + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") + +func jsonschemaDraft04JsonBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04Json, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04Json() (*asset, error) { + bytes, err := jsonschemaDraft04JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} + return a, nil +} + +var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") + +func v2SchemaJsonBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJson, + "v2/schema.json", + ) +} + +func v2SchemaJson() (*asset, error) { + bytes, err := v2SchemaJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04Json, + + "v2/schema.json": v2SchemaJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, + "v2": {nil, map[string]*bintree{ + "schema.json": {v2SchemaJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 00000000000..122993b44b4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 00000000000..2f7bb219b56 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 00000000000..fc889f6d0b0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 00000000000..6992c7ba730 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 00000000000..d4ea889d44d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,594 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +// +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +// +// Setting the cache is optional and this parameter may safely be left to nil. +func baseForRoot(root interface{}, cache ResolutionCache) string { + if root == nil { + return "" + } + + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + switch { + case resolver.isCircular(&rebasedRef, basePath, parentRefs...): + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + case !resolver.options.SkipSchemas: + // schema expanded to a $ref in another root + sch.Ref = rebasedRef + debugLog("rebased to: %s", sch.Ref.String()) + default: + // skip schema expansion but rebase $ref to schema + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } + } + + if ref != nil { + *ref = Ref{} + } + + // expand schema + if !resolver.options.SkipSchemas { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + if s == nil { + // guard for when continuing on error + return nil + } + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 00000000000..88add91b2b8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 00000000000..9dfd17b185f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 00000000000..c458b49b216 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 00000000000..e2afb2133b9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 00000000000..b42f80368ec --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 00000000000..e8b60099457 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 00000000000..2df0723154f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(u *url.URL, in string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 00000000000..a66c532dbc6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 00000000000..995ce6acb17 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,397 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 00000000000..2b2b89b67bf --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 00000000000..68fc8e90144 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 00000000000..9dc82a2901d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 00000000000..2af13787ab1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetString("x-order") + ij, okj := items[j].Extensions.GetString("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 00000000000..b0ef9bd9c9b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 00000000000..47d1ee13fc7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 00000000000..0340b60d845 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 00000000000..4efb6f868bd --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = &v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 00000000000..4e9be8576bb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 00000000000..b81175afdf4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,338 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + unescaped, err := url.PathUnescape(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + u := url.URL{Path: unescaped} + + data, fromCache := r.cache.Get(u.RequestURI()) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 00000000000..9d0bdae9081 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 00000000000..7d38b6e6251 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 00000000000..44722ffd5ad --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 00000000000..faa3d3de1eb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go new file mode 100644 index 00000000000..60b78515363 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go18.go @@ -0,0 +1,8 @@ +//go:build !go1.19 +// +build !go1.19 + +package spec + +import "net/url" + +var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 00000000000..392e3e6395b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 00000000000..6360a8ea774 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 00000000000..945a46703d5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 00000000000..d020be8ea4e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 00000000000..dd91ed6a04e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 00000000000..d36b25665c4 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,50 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 31 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoinits + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - wrapcheck + - testpackage + - nlreturn + - gofumpt + - goerr113 + - gci + - gomnd + - godot + - exhaustivestruct + - paralleltest + - varnamelen + - ireturn + - exhaustruct + #- thelper + +issues: + exclude-rules: + - path: bson.go + text: "should be .*ObjectID" + linters: + - golint + - stylecheck + diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 00000000000..0cf89d77661 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,88 @@ +# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 +- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 00000000000..8740b150599 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bsontype.ObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 00000000000..f0b310964d9 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,187 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.Parse(RFC3339FullDate, string(text)) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.Parse(RFC3339FullDate, strdate) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.Parse(RFC3339FullDate, data) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 00000000000..a89a4de3f38 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2035 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //